applied-ai-018 commited on
Commit
384088e
·
verified ·
1 Parent(s): 8171625

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__init__.py +127 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/configuration_blip.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/convert_blip_original_pytorch_to_hf.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/image_processing_blip.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip_text.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/processing_blip.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/configuration_blip.py +365 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/convert_blip_original_pytorch_to_hf.py +191 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/image_processing_blip.py +312 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/modeling_blip.py +1433 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/modeling_blip_text.py +945 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip.py +1701 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip_text.py +1122 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/blip/processing_blip.py +150 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__init__.py +82 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/configuration_encoder_decoder.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_encoder_decoder.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_flax_encoder_decoder.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_tf_encoder_decoder.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/configuration_encoder_decoder.py +106 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_encoder_decoder.py +693 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py +899 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +663 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__init__.py +81 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/configuration_grounding_dino.py +301 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/convert_grounding_dino_to_hf.py +491 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/image_processing_grounding_dino.py +1511 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/modeling_grounding_dino.py +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/processing_grounding_dino.py +228 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__init__.py +74 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/processing_idefics2.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/configuration_idefics2.py +262 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/convert_idefics2_weights_to_hf.py +185 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/image_processing_idefics2.py +596 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/modeling_idefics2.py +1956 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/processing_idefics2.py +348 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__init__.py +120 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/modeling_layoutlm.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/modeling_tf_layoutlm.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/tokenization_layoutlm.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/tokenization_layoutlm_fast.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/configuration_layoutlm.py +198 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/modeling_layoutlm.py +1368 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__init__.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_blip": [
27
+ "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
28
+ "BlipConfig",
29
+ "BlipTextConfig",
30
+ "BlipVisionConfig",
31
+ ],
32
+ "processing_blip": ["BlipProcessor"],
33
+ }
34
+
35
+ try:
36
+ if not is_vision_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["image_processing_blip"] = ["BlipImageProcessor"]
42
+
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ _import_structure["modeling_blip"] = [
51
+ "BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
52
+ "BlipModel",
53
+ "BlipPreTrainedModel",
54
+ "BlipForConditionalGeneration",
55
+ "BlipForQuestionAnswering",
56
+ "BlipVisionModel",
57
+ "BlipTextModel",
58
+ "BlipForImageTextRetrieval",
59
+ ]
60
+
61
+ try:
62
+ if not is_tf_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ _import_structure["modeling_tf_blip"] = [
68
+ "TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
69
+ "TFBlipModel",
70
+ "TFBlipPreTrainedModel",
71
+ "TFBlipForConditionalGeneration",
72
+ "TFBlipForQuestionAnswering",
73
+ "TFBlipVisionModel",
74
+ "TFBlipTextModel",
75
+ "TFBlipForImageTextRetrieval",
76
+ ]
77
+
78
+ if TYPE_CHECKING:
79
+ from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
80
+ from .processing_blip import BlipProcessor
81
+
82
+ try:
83
+ if not is_vision_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .image_processing_blip import BlipImageProcessor
89
+
90
+ try:
91
+ if not is_torch_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_blip import (
97
+ BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
98
+ BlipForConditionalGeneration,
99
+ BlipForImageTextRetrieval,
100
+ BlipForQuestionAnswering,
101
+ BlipModel,
102
+ BlipPreTrainedModel,
103
+ BlipTextModel,
104
+ BlipVisionModel,
105
+ )
106
+
107
+ try:
108
+ if not is_tf_available():
109
+ raise OptionalDependencyNotAvailable()
110
+ except OptionalDependencyNotAvailable:
111
+ pass
112
+ else:
113
+ from .modeling_tf_blip import (
114
+ TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
115
+ TFBlipForConditionalGeneration,
116
+ TFBlipForImageTextRetrieval,
117
+ TFBlipForQuestionAnswering,
118
+ TFBlipModel,
119
+ TFBlipPreTrainedModel,
120
+ TFBlipTextModel,
121
+ TFBlipVisionModel,
122
+ )
123
+
124
+ else:
125
+ import sys
126
+
127
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/configuration_blip.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/convert_blip_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (4.72 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/image_processing_blip.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip.cpython-310.pyc ADDED
Binary file (48.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_blip_text.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip.cpython-310.pyc ADDED
Binary file (54.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/modeling_tf_blip_text.cpython-310.pyc ADDED
Binary file (32.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/__pycache__/processing_blip.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/configuration_blip.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Blip model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class BlipTextConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`BlipTextModel`]. It is used to instantiate a BLIP
33
+ text model according to the specified arguments, defining the model architecture. Instantiating a configuration
34
+ with the defaults will yield a similar configuration to that of the `BlipText` used by the [base
35
+ architectures](https://huggingface.co/Salesforce/blip-vqa-base).
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 30524):
43
+ Vocabulary size of the `Blip` text model. Defines the number of different tokens that can be represented by
44
+ the `inputs_ids` passed when calling [`BlipModel`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ encoder_hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the encoder layers from the vision model.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ num_hidden_layers (`int`, *optional*, defaults to 12):
52
+ Number of hidden layers in the Transformer encoder.
53
+ num_attention_heads (`int`, *optional*, defaults to 8):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ max_position_embeddings (`int`, *optional*, defaults to 512):
56
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
57
+ just in case (e.g., 512 or 1024 or 2048).
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
61
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
62
+ The epsilon used by the layer normalization layers.
63
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
64
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
65
+ attention_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio for the attention probabilities.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ bos_token_id (`int`, *optional*, defaults to 30522):
70
+ The id of the `beginning-of-sequence` token.
71
+ eos_token_id (`int`, *optional*, defaults to 2):
72
+ The id of the `end-of-sequence` token.
73
+ pad_token_id (`int`, *optional*, defaults to 0):
74
+ The id of the `padding` token.
75
+ sep_token_id (`int`, *optional*, defaults to 102):
76
+ The id of the `separator` token.
77
+ is_decoder (`bool`, *optional*, defaults to `True`):
78
+ Whether the model is used as a decoder.
79
+ use_cache (`bool`, *optional*, defaults to `True`):
80
+ Whether or not the model should return the last key/values attentions (not used by all models).
81
+ label_smoothing (float, *optional*):
82
+ A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets
83
+ become a mixture of the original ground truth and a uniform distribution as described in
84
+ `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
85
+
86
+ Example:
87
+
88
+ ```python
89
+ >>> from transformers import BlipTextConfig, BlipTextModel
90
+
91
+ >>> # Initializing a BlipTextConfig with Salesforce/blip-vqa-base style configuration
92
+ >>> configuration = BlipTextConfig()
93
+
94
+ >>> # Initializing a BlipTextModel (with random weights) from the Salesforce/blip-vqa-base style configuration
95
+ >>> model = BlipTextModel(configuration)
96
+
97
+ >>> # Accessing the model configuration
98
+ >>> configuration = model.config
99
+ ```"""
100
+
101
+ model_type = "blip_text_model"
102
+
103
+ def __init__(
104
+ self,
105
+ vocab_size=30524,
106
+ hidden_size=768,
107
+ encoder_hidden_size=768,
108
+ intermediate_size=3072,
109
+ projection_dim=768,
110
+ num_hidden_layers=12,
111
+ num_attention_heads=8,
112
+ max_position_embeddings=512,
113
+ hidden_act="gelu",
114
+ layer_norm_eps=1e-12,
115
+ hidden_dropout_prob=0.0,
116
+ attention_probs_dropout_prob=0.0,
117
+ initializer_range=0.02,
118
+ bos_token_id=30522,
119
+ eos_token_id=2,
120
+ pad_token_id=0,
121
+ sep_token_id=102,
122
+ is_decoder=True,
123
+ use_cache=True,
124
+ label_smoothing=0.0,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(
128
+ pad_token_id=pad_token_id,
129
+ bos_token_id=bos_token_id,
130
+ eos_token_id=eos_token_id,
131
+ sep_token_id=sep_token_id,
132
+ **kwargs,
133
+ )
134
+
135
+ self.vocab_size = vocab_size
136
+ self.hidden_size = hidden_size
137
+ self.encoder_hidden_size = encoder_hidden_size
138
+ self.intermediate_size = intermediate_size
139
+ self.projection_dim = projection_dim
140
+ self.hidden_dropout_prob = hidden_dropout_prob
141
+ self.num_hidden_layers = num_hidden_layers
142
+ self.num_attention_heads = num_attention_heads
143
+ self.max_position_embeddings = max_position_embeddings
144
+ self.layer_norm_eps = layer_norm_eps
145
+ self.hidden_act = hidden_act
146
+ self.initializer_range = initializer_range
147
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
148
+ self.is_decoder = is_decoder
149
+ self.use_cache = use_cache
150
+ self.label_smoothing = label_smoothing
151
+
152
+ @classmethod
153
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
154
+ cls._set_token_in_kwargs(kwargs)
155
+
156
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
157
+
158
+ # get the text config dict if we are loading from BlipConfig
159
+ if config_dict.get("model_type") == "blip":
160
+ config_dict = config_dict["text_config"]
161
+
162
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
163
+ logger.warning(
164
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
165
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
166
+ )
167
+
168
+ return cls.from_dict(config_dict, **kwargs)
169
+
170
+
171
+ class BlipVisionConfig(PretrainedConfig):
172
+ r"""
173
+ This is the configuration class to store the configuration of a [`BlipVisionModel`]. It is used to instantiate a
174
+ BLIP vision model according to the specified arguments, defining the model architecture. Instantiating a
175
+ configuration defaults will yield a similar configuration to that of the Blip-base
176
+ [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture.
177
+
178
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
179
+ documentation from [`PretrainedConfig`] for more information.
180
+
181
+
182
+ Args:
183
+ hidden_size (`int`, *optional*, defaults to 768):
184
+ Dimensionality of the encoder layers and the pooler layer.
185
+ intermediate_size (`int`, *optional*, defaults to 3072):
186
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
187
+ num_hidden_layers (`int`, *optional*, defaults to 12):
188
+ Number of hidden layers in the Transformer encoder.
189
+ num_attention_heads (`int`, *optional*, defaults to 12):
190
+ Number of attention heads for each attention layer in the Transformer encoder.
191
+ image_size (`int`, *optional*, defaults to 384):
192
+ The size (resolution) of each image.
193
+ patch_size (`int`, *optional*, defaults to 16):
194
+ The size (resolution) of each patch.
195
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
196
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
197
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
198
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
199
+ The epsilon used by the layer normalization layers.
200
+ attention_dropout (`float`, *optional*, defaults to 0.0):
201
+ The dropout ratio for the attention probabilities.
202
+ initializer_range (`float`, *optional*, defaults to 1e-10):
203
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
204
+
205
+ Example:
206
+
207
+ ```python
208
+ >>> from transformers import BlipVisionConfig, BlipVisionModel
209
+
210
+ >>> # Initializing a BlipVisionConfig with Salesforce/blip-vqa-base style configuration
211
+ >>> configuration = BlipVisionConfig()
212
+
213
+ >>> # Initializing a BlipVisionModel (with random weights) from the Salesforce/blip-vqa-base style configuration
214
+ >>> model = BlipVisionModel(configuration)
215
+
216
+ >>> # Accessing the model configuration
217
+ >>> configuration = model.config
218
+ ```"""
219
+
220
+ model_type = "blip_vision_model"
221
+
222
+ def __init__(
223
+ self,
224
+ hidden_size=768,
225
+ intermediate_size=3072,
226
+ projection_dim=512,
227
+ num_hidden_layers=12,
228
+ num_attention_heads=12,
229
+ image_size=384,
230
+ patch_size=16,
231
+ hidden_act="gelu",
232
+ layer_norm_eps=1e-5,
233
+ attention_dropout=0.0,
234
+ initializer_range=1e-10,
235
+ **kwargs,
236
+ ):
237
+ super().__init__(**kwargs)
238
+
239
+ self.hidden_size = hidden_size
240
+ self.intermediate_size = intermediate_size
241
+ self.projection_dim = projection_dim
242
+ self.num_hidden_layers = num_hidden_layers
243
+ self.num_attention_heads = num_attention_heads
244
+ self.patch_size = patch_size
245
+ self.image_size = image_size
246
+ self.initializer_range = initializer_range
247
+ self.attention_dropout = attention_dropout
248
+ self.layer_norm_eps = layer_norm_eps
249
+ self.hidden_act = hidden_act
250
+
251
+ @classmethod
252
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
253
+ cls._set_token_in_kwargs(kwargs)
254
+
255
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
256
+
257
+ # get the vision config dict if we are loading from BlipConfig
258
+ if config_dict.get("model_type") == "blip":
259
+ config_dict = config_dict["vision_config"]
260
+
261
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
262
+ logger.warning(
263
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
264
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
265
+ )
266
+
267
+ return cls.from_dict(config_dict, **kwargs)
268
+
269
+
270
+ class BlipConfig(PretrainedConfig):
271
+ r"""
272
+ [`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate
273
+ a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
274
+ a configuration with the defaults will yield a similar configuration to that of the BLIP-base
275
+ [Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture.
276
+
277
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
278
+ documentation from [`PretrainedConfig`] for more information.
279
+
280
+ Args:
281
+ text_config (`dict`, *optional*):
282
+ Dictionary of configuration options used to initialize [`BlipTextConfig`].
283
+ vision_config (`dict`, *optional*):
284
+ Dictionary of configuration options used to initialize [`BlipVisionConfig`].
285
+ projection_dim (`int`, *optional*, defaults to 512):
286
+ Dimentionality of text and vision projection layers.
287
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
288
+ The inital value of the *logit_scale* paramter. Default is used as per the original BLIP implementation.
289
+ image_text_hidden_size (`int`, *optional*, defaults to 256):
290
+ Dimentionality of the hidden state of the image-text fusion layer.
291
+ label_smoothing (float, optional, *optional*, defaults to 0.0):
292
+ A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets
293
+ become a mixture of the original ground truth and a uniform distribution as described in
294
+ `Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
295
+ kwargs (*optional*):
296
+ Dictionary of keyword arguments.
297
+
298
+ Example:
299
+
300
+ ```python
301
+ >>> from transformers import BlipConfig, BlipModel
302
+
303
+ >>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration
304
+ >>> configuration = BlipConfig()
305
+
306
+ >>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration
307
+ >>> model = BlipModel(configuration)
308
+
309
+ >>> # Accessing the model configuration
310
+ >>> configuration = model.config
311
+
312
+ >>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig
313
+
314
+ >>> # Initializing a BLIPText and BLIPVision configuration
315
+ >>> config_text = BlipTextConfig()
316
+ >>> config_vision = BlipVisionConfig()
317
+
318
+ >>> config = BlipConfig.from_text_vision_configs(config_text, config_vision)
319
+ ```"""
320
+
321
+ model_type = "blip"
322
+
323
+ def __init__(
324
+ self,
325
+ text_config=None,
326
+ vision_config=None,
327
+ projection_dim=512,
328
+ logit_scale_init_value=2.6592,
329
+ image_text_hidden_size=256,
330
+ label_smoothing=0.0,
331
+ **kwargs,
332
+ ):
333
+ super().__init__(**kwargs)
334
+
335
+ if text_config is None:
336
+ text_config = {}
337
+ logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.")
338
+
339
+ if vision_config is None:
340
+ vision_config = {}
341
+ logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.")
342
+
343
+ self.text_config = BlipTextConfig(**text_config)
344
+ self.vision_config = BlipVisionConfig(**vision_config)
345
+
346
+ self.text_config.encoder_hidden_size = self.vision_config.hidden_size
347
+
348
+ self.projection_dim = projection_dim
349
+ self.logit_scale_init_value = logit_scale_init_value
350
+ self.initializer_factor = 1.0
351
+ self.initializer_range = 0.02
352
+ self.image_text_hidden_size = image_text_hidden_size
353
+ self.label_smoothing = label_smoothing
354
+
355
+ @classmethod
356
+ def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: BlipVisionConfig, **kwargs):
357
+ r"""
358
+ Instantiate a [`BlipConfig`] (or a derived class) from blip text model configuration and blip vision model
359
+ configuration.
360
+
361
+ Returns:
362
+ [`BlipConfig`]: An instance of a configuration object
363
+ """
364
+
365
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/convert_blip_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import re
18
+
19
+ import requests
20
+ import torch
21
+
22
+ # git clone https://github.com/salesforce/BLIP.git
23
+ from models.blip import blip_decoder
24
+ from models.blip_itm import blip_itm
25
+ from models.blip_vqa import blip_vqa
26
+ from PIL import Image
27
+ from torchvision import transforms
28
+ from torchvision.transforms.functional import InterpolationMode
29
+
30
+ from transformers import (
31
+ BertTokenizer,
32
+ BlipConfig,
33
+ BlipForConditionalGeneration,
34
+ BlipForImageTextRetrieval,
35
+ BlipForQuestionAnswering,
36
+ )
37
+
38
+
39
+ def load_demo_image(image_size, device):
40
+ img_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
41
+ raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
42
+
43
+ transform = transforms.Compose(
44
+ [
45
+ transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),
46
+ transforms.ToTensor(),
47
+ transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
48
+ ]
49
+ )
50
+ image = transform(raw_image).unsqueeze(0).to(device)
51
+ return image
52
+
53
+
54
+ def rename_key(key):
55
+ if "visual_encoder" in key:
56
+ key = re.sub("visual_encoder*", "vision_model.encoder", key)
57
+ if "blocks" in key:
58
+ key = re.sub(r"blocks", "layers", key)
59
+ if "attn" in key:
60
+ key = re.sub(r"attn", "self_attn", key)
61
+ if "norm1" in key:
62
+ key = re.sub(r"norm1", "layer_norm1", key)
63
+ if "norm2" in key:
64
+ key = re.sub(r"norm2", "layer_norm2", key)
65
+ if "encoder.norm" in key:
66
+ key = re.sub(r"encoder.norm", "post_layernorm", key)
67
+ if "encoder.patch_embed.proj" in key:
68
+ key = re.sub(r"encoder.patch_embed.proj", "embeddings.patch_embedding", key)
69
+
70
+ if "encoder.pos_embed" in key:
71
+ key = re.sub(r"encoder.pos_embed", "embeddings.position_embedding", key)
72
+ if "encoder.cls_token" in key:
73
+ key = re.sub(r"encoder.cls_token", "embeddings.class_embedding", key)
74
+
75
+ if "self_attn" in key:
76
+ key = re.sub(r"self_attn.proj", "self_attn.projection", key)
77
+
78
+ return key
79
+
80
+
81
+ @torch.no_grad()
82
+ def convert_blip_checkpoint(pytorch_dump_folder_path, config_path=None):
83
+ """
84
+ Copy/paste/tweak model's weights to transformers design.
85
+ """
86
+ if config_path is not None:
87
+ config = BlipConfig.from_pretrained(config_path)
88
+ else:
89
+ config = BlipConfig(projection_dim=512, text_config={}, vision_config={})
90
+
91
+ hf_model = BlipForConditionalGeneration(config).eval()
92
+
93
+ model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
94
+
95
+ pt_model = blip_decoder(pretrained=model_url, image_size=384, vit="base")
96
+ pt_model = pt_model.eval()
97
+
98
+ modified_state_dict = pt_model.state_dict()
99
+ for key in modified_state_dict.copy():
100
+ value = modified_state_dict.pop(key)
101
+ renamed_key = rename_key(key)
102
+ modified_state_dict[renamed_key] = value
103
+
104
+ hf_model.load_state_dict(modified_state_dict)
105
+
106
+ image_size = 384
107
+ image = load_demo_image(image_size=image_size, device="cpu")
108
+ tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
109
+ input_ids = tokenizer(["a picture of"]).input_ids
110
+
111
+ out = hf_model.generate(image, input_ids)
112
+
113
+ assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
114
+
115
+ out = hf_model.generate(image)
116
+
117
+ assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
118
+
119
+ if pytorch_dump_folder_path is not None:
120
+ hf_model.save_pretrained(pytorch_dump_folder_path)
121
+
122
+ # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
123
+ model_url = (
124
+ "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
125
+ )
126
+
127
+ vqa_model = blip_vqa(pretrained=model_url, image_size=image_size, vit="base")
128
+ vqa_model.eval()
129
+
130
+ modified_state_dict = vqa_model.state_dict()
131
+ for key in modified_state_dict.copy():
132
+ value = modified_state_dict.pop(key)
133
+ renamed_key = rename_key(key)
134
+ modified_state_dict[renamed_key] = value
135
+
136
+ hf_vqa_model = BlipForQuestionAnswering(config)
137
+
138
+ hf_vqa_model.load_state_dict(modified_state_dict)
139
+
140
+ question = ["How many dogs are in this image?"]
141
+ question_input_ids = tokenizer(question, return_tensors="pt").input_ids
142
+
143
+ answer = hf_vqa_model.generate(question_input_ids, image)
144
+ print(tokenizer.decode(answer[0]))
145
+
146
+ assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
147
+ if pytorch_dump_folder_path is not None:
148
+ hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa")
149
+
150
+ model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
151
+
152
+ itm_model = blip_itm(pretrained=model_url, image_size=image_size, vit="base")
153
+ itm_model.eval()
154
+
155
+ modified_state_dict = itm_model.state_dict()
156
+ for key in modified_state_dict.copy():
157
+ value = modified_state_dict.pop(key)
158
+ renamed_key = rename_key(key)
159
+ modified_state_dict[renamed_key] = value
160
+
161
+ hf_itm_model = BlipForImageTextRetrieval(config)
162
+
163
+ question = ["A picture of a woman with a dog sitting in a beach"]
164
+ question_input_ids = tokenizer(
165
+ question,
166
+ return_tensors="pt",
167
+ padding="max_length",
168
+ truncation=True,
169
+ max_length=35,
170
+ ).input_ids
171
+
172
+ hf_itm_model.load_state_dict(modified_state_dict)
173
+ hf_itm_model.eval()
174
+
175
+ out_itm = hf_itm_model(question_input_ids, image, use_itm_head=True)
176
+ out = hf_itm_model(question_input_ids, image, use_itm_head=False)
177
+
178
+ assert out[0].item() == 0.2110687494277954
179
+ assert torch.nn.functional.softmax(out_itm[0], dim=1)[:, 1].item() == 0.45698845386505127
180
+
181
+ if pytorch_dump_folder_path is not None:
182
+ hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm")
183
+
184
+
185
+ if __name__ == "__main__":
186
+ parser = argparse.ArgumentParser()
187
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
188
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
189
+ args = parser.parse_args()
190
+
191
+ convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/image_processing_blip.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for BLIP."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ OPENAI_CLIP_MEAN,
25
+ OPENAI_CLIP_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ infer_channel_dimension_format,
30
+ is_scaled_image,
31
+ make_list_of_images,
32
+ to_numpy_array,
33
+ valid_images,
34
+ validate_kwargs,
35
+ validate_preprocess_arguments,
36
+ )
37
+ from ...utils import TensorType, is_vision_available, logging
38
+
39
+
40
+ if is_vision_available():
41
+ import PIL
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class BlipImageProcessor(BaseImageProcessor):
48
+ r"""
49
+ Constructs a BLIP image processor.
50
+
51
+ Args:
52
+ do_resize (`bool`, *optional*, defaults to `True`):
53
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
54
+ `do_resize` parameter in the `preprocess` method.
55
+ size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
56
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
57
+ method.
58
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
59
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
60
+ overridden by the `resample` parameter in the `preprocess` method.
61
+ do_rescale (`bool`, *optional*, defaults to `True`):
62
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
63
+ `do_rescale` parameter in the `preprocess` method.
64
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
65
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
66
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
67
+ do_normalize (`bool`, *optional*, defaults to `True`):
68
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
69
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
70
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
71
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
72
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
73
+ overridden by the `image_mean` parameter in the `preprocess` method.
74
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
75
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
76
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
77
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
78
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
79
+ Whether to convert the image to RGB.
80
+ """
81
+
82
+ model_input_names = ["pixel_values"]
83
+
84
+ def __init__(
85
+ self,
86
+ do_resize: bool = True,
87
+ size: Dict[str, int] = None,
88
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
89
+ do_rescale: bool = True,
90
+ rescale_factor: Union[int, float] = 1 / 255,
91
+ do_normalize: bool = True,
92
+ image_mean: Optional[Union[float, List[float]]] = None,
93
+ image_std: Optional[Union[float, List[float]]] = None,
94
+ do_convert_rgb: bool = True,
95
+ **kwargs,
96
+ ) -> None:
97
+ super().__init__(**kwargs)
98
+ size = size if size is not None else {"height": 384, "width": 384}
99
+ size = get_size_dict(size, default_to_square=True)
100
+
101
+ self.do_resize = do_resize
102
+ self.size = size
103
+ self.resample = resample
104
+ self.do_rescale = do_rescale
105
+ self.rescale_factor = rescale_factor
106
+ self.do_normalize = do_normalize
107
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
108
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
109
+ self.do_convert_rgb = do_convert_rgb
110
+ self._valid_processor_keys = [
111
+ "images",
112
+ "do_resize",
113
+ "size",
114
+ "resample",
115
+ "do_rescale",
116
+ "rescale_factor",
117
+ "do_normalize",
118
+ "image_mean",
119
+ "image_std",
120
+ "do_convert_rgb",
121
+ "return_tensors",
122
+ "data_format",
123
+ "input_data_format",
124
+ ]
125
+
126
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
127
+ def resize(
128
+ self,
129
+ image: np.ndarray,
130
+ size: Dict[str, int],
131
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
132
+ data_format: Optional[Union[str, ChannelDimension]] = None,
133
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
134
+ **kwargs,
135
+ ) -> np.ndarray:
136
+ """
137
+ Resize an image to `(size["height"], size["width"])`.
138
+
139
+ Args:
140
+ image (`np.ndarray`):
141
+ Image to resize.
142
+ size (`Dict[str, int]`):
143
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
144
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
145
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
146
+ data_format (`ChannelDimension` or `str`, *optional*):
147
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
148
+ image is used. Can be one of:
149
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
150
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
151
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
152
+ input_data_format (`ChannelDimension` or `str`, *optional*):
153
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
154
+ from the input image. Can be one of:
155
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
156
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
157
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
158
+
159
+ Returns:
160
+ `np.ndarray`: The resized image.
161
+ """
162
+ size = get_size_dict(size)
163
+ if "height" not in size or "width" not in size:
164
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
165
+ output_size = (size["height"], size["width"])
166
+ return resize(
167
+ image,
168
+ size=output_size,
169
+ resample=resample,
170
+ data_format=data_format,
171
+ input_data_format=input_data_format,
172
+ **kwargs,
173
+ )
174
+
175
+ def preprocess(
176
+ self,
177
+ images: ImageInput,
178
+ do_resize: Optional[bool] = None,
179
+ size: Optional[Dict[str, int]] = None,
180
+ resample: PILImageResampling = None,
181
+ do_rescale: Optional[bool] = None,
182
+ rescale_factor: Optional[float] = None,
183
+ do_normalize: Optional[bool] = None,
184
+ image_mean: Optional[Union[float, List[float]]] = None,
185
+ image_std: Optional[Union[float, List[float]]] = None,
186
+ return_tensors: Optional[Union[str, TensorType]] = None,
187
+ do_convert_rgb: bool = None,
188
+ data_format: ChannelDimension = ChannelDimension.FIRST,
189
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
190
+ **kwargs,
191
+ ) -> PIL.Image.Image:
192
+ """
193
+ Preprocess an image or batch of images.
194
+
195
+ Args:
196
+ images (`ImageInput`):
197
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
198
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
199
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
200
+ Whether to resize the image.
201
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
202
+ Controls the size of the image after `resize`. The shortest edge of the image is resized to
203
+ `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
204
+ is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
205
+ edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
206
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
207
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
208
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
209
+ Whether to rescale the image values between [0 - 1].
210
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
211
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
212
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
213
+ Whether to normalize the image.
214
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
215
+ Image mean to normalize the image by if `do_normalize` is set to `True`.
216
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
217
+ Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
218
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
219
+ Whether to convert the image to RGB.
220
+ return_tensors (`str` or `TensorType`, *optional*):
221
+ The type of tensors to return. Can be one of:
222
+ - Unset: Return a list of `np.ndarray`.
223
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
224
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
225
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
226
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
227
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
228
+ The channel dimension format for the output image. Can be one of:
229
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
230
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
231
+ - Unset: Use the channel dimension format of the input image.
232
+ input_data_format (`ChannelDimension` or `str`, *optional*):
233
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
234
+ from the input image. Can be one of:
235
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
236
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
237
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
238
+ """
239
+ do_resize = do_resize if do_resize is not None else self.do_resize
240
+ resample = resample if resample is not None else self.resample
241
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
242
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
243
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
244
+ image_mean = image_mean if image_mean is not None else self.image_mean
245
+ image_std = image_std if image_std is not None else self.image_std
246
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
247
+
248
+ size = size if size is not None else self.size
249
+ size = get_size_dict(size, default_to_square=False)
250
+
251
+ images = make_list_of_images(images)
252
+
253
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
254
+
255
+ if not valid_images(images):
256
+ raise ValueError(
257
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
258
+ "torch.Tensor, tf.Tensor or jax.ndarray."
259
+ )
260
+
261
+ validate_preprocess_arguments(
262
+ do_rescale=do_rescale,
263
+ rescale_factor=rescale_factor,
264
+ do_normalize=do_normalize,
265
+ image_mean=image_mean,
266
+ image_std=image_std,
267
+ do_resize=do_resize,
268
+ size=size,
269
+ resample=resample,
270
+ )
271
+ # PIL RGBA images are converted to RGB
272
+ if do_convert_rgb:
273
+ images = [convert_to_rgb(image) for image in images]
274
+
275
+ # All transformations expect numpy arrays.
276
+ images = [to_numpy_array(image) for image in images]
277
+
278
+ if is_scaled_image(images[0]) and do_rescale:
279
+ logger.warning_once(
280
+ "It looks like you are trying to rescale already rescaled images. If the input"
281
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
282
+ )
283
+
284
+ if input_data_format is None:
285
+ # We assume that all images have the same channel dimension format.
286
+ input_data_format = infer_channel_dimension_format(images[0])
287
+
288
+ if do_resize:
289
+ images = [
290
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
291
+ for image in images
292
+ ]
293
+
294
+ if do_rescale:
295
+ images = [
296
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
297
+ for image in images
298
+ ]
299
+
300
+ if do_normalize:
301
+ images = [
302
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
303
+ for image in images
304
+ ]
305
+
306
+ images = [
307
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
308
+ ]
309
+
310
+ encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
311
+
312
+ return encoded_outputs
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/modeling_blip.py ADDED
@@ -0,0 +1,1433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BLIP model."""
16
+
17
+ import warnings
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn.functional import normalize
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
28
+ from ...modeling_utils import PreTrainedModel
29
+ from ...utils import (
30
+ ModelOutput,
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig
37
+ from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base"
43
+
44
+
45
+ from ..deprecated._archive_maps import BLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
46
+
47
+
48
+ # Copied from transformers.models.clip.modeling_clip.contrastive_loss
49
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
50
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
51
+
52
+
53
+ # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->blip
54
+ def blip_loss(similarity: torch.Tensor) -> torch.Tensor:
55
+ caption_loss = contrastive_loss(similarity)
56
+ image_loss = contrastive_loss(similarity.t())
57
+ return (caption_loss + image_loss) / 2.0
58
+
59
+
60
+ @dataclass
61
+ class BlipForConditionalGenerationModelOutput(ModelOutput):
62
+ """
63
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
64
+ last hidden states. This class also adds the loss term from the text decoder.
65
+
66
+ Args:
67
+ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
68
+ Languge modeling loss from the text decoder.
69
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
70
+ Prediction scores of the language modeling head of the text decoder model.
71
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*):
72
+ The image embeddings obtained after applying the Vision Transformer model to the input image.
73
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
74
+ Sequence of hidden-states at the output of the last layer of the model.
75
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
76
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
77
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
78
+
79
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
80
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed):
81
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
82
+ sequence_length)`.
83
+
84
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
85
+ heads.
86
+ """
87
+
88
+ loss: Optional[Tuple[torch.FloatTensor]] = None
89
+ logits: Optional[Tuple[torch.FloatTensor]] = None
90
+ image_embeds: Optional[torch.FloatTensor] = None
91
+ last_hidden_state: torch.FloatTensor = None
92
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
93
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
94
+
95
+ @property
96
+ def decoder_logits(self):
97
+ warnings.warn(
98
+ "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers."
99
+ " Please use the `logits` attribute to retrieve the final output instead.",
100
+ FutureWarning,
101
+ )
102
+ return self.logits
103
+
104
+
105
+ @dataclass
106
+ class BlipTextVisionModelOutput(ModelOutput):
107
+ """
108
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
109
+ last hidden states. This class also adds the loss term from the text decoder.
110
+
111
+ Args:
112
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
113
+ Languge modeling loss from the text decoder.
114
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
115
+ The image embeddings obtained by applying the projection layer to the pooler_output.
116
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
117
+ Sequence of hidden-states at the output of the last layer of the model.
118
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
119
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
120
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
121
+
122
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
123
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
124
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
125
+ sequence_length)`.
126
+
127
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
128
+ heads.
129
+ """
130
+
131
+ loss: Optional[torch.FloatTensor] = None
132
+ image_embeds: Optional[torch.FloatTensor] = None
133
+ last_hidden_state: torch.FloatTensor = None
134
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
135
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
136
+
137
+
138
+ @dataclass
139
+ class BlipImageTextMatchingModelOutput(ModelOutput):
140
+ """
141
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
142
+ last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity
143
+ scores.
144
+
145
+ Args:
146
+ itm_score (`torch.FloatTensor`):
147
+ The image-text similarity scores.
148
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
149
+ Languge modeling loss from the text decoder.
150
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
151
+ The image embeddings obtained by applying the projection layer to the pooler_output.
152
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
153
+ Sequence of hidden-states at the output of the last layer of the model.
154
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
155
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
156
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
157
+
158
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
159
+ vision_pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*):
160
+ Last layer hidden-state of the vision of the vision-only branch of the model.
161
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
162
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
163
+ sequence_length)`.
164
+
165
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
166
+ heads.
167
+ question_embeds (`torch.FloatTensor`):
168
+ The question embeddings obtained by the text projection layer.
169
+ """
170
+
171
+ itm_score: Optional[torch.FloatTensor] = None
172
+ loss: Optional[torch.FloatTensor] = None
173
+ image_embeds: Optional[torch.FloatTensor] = None
174
+ last_hidden_state: torch.FloatTensor = None
175
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
176
+ vision_pooler_output: Optional[torch.FloatTensor] = None
177
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
178
+ question_embeds: Optional[Tuple[torch.FloatTensor]] = None
179
+
180
+
181
+ @dataclass
182
+ class BlipOutput(ModelOutput):
183
+ """
184
+ Args:
185
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
186
+ Contrastive loss for image-text similarity.
187
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
188
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
189
+ similarity scores.
190
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
191
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
192
+ similarity scores.
193
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
194
+ The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`].
195
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
196
+ The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`].
197
+ text_model_output(`BaseModelOutputWithPooling`):
198
+ The output of the [`BlipTextModel`].
199
+ vision_model_output(`BaseModelOutputWithPooling`):
200
+ The output of the [`BlipVisionModel`].
201
+ """
202
+
203
+ loss: Optional[torch.FloatTensor] = None
204
+ logits_per_image: torch.FloatTensor = None
205
+ logits_per_text: torch.FloatTensor = None
206
+ text_embeds: torch.FloatTensor = None
207
+ image_embeds: torch.FloatTensor = None
208
+ text_model_output: BaseModelOutputWithPooling = None
209
+ vision_model_output: BaseModelOutputWithPooling = None
210
+
211
+ def to_tuple(self) -> Tuple[Any]:
212
+ return tuple(
213
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
214
+ for k in self.keys()
215
+ )
216
+
217
+
218
+ class BlipVisionEmbeddings(nn.Module):
219
+ def __init__(self, config: BlipVisionConfig):
220
+ super().__init__()
221
+ self.config = config
222
+ self.embed_dim = config.hidden_size
223
+ self.image_size = config.image_size
224
+ self.patch_size = config.patch_size
225
+
226
+ self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
227
+
228
+ self.patch_embedding = nn.Conv2d(
229
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
230
+ )
231
+
232
+ self.num_patches = (self.image_size // self.patch_size) ** 2
233
+ self.num_positions = self.num_patches + 1
234
+
235
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
236
+
237
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
238
+ batch_size = pixel_values.shape[0]
239
+ target_dtype = self.patch_embedding.weight.dtype
240
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
241
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
242
+
243
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
244
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
245
+ embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
246
+ return embeddings
247
+
248
+
249
+ # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Blip
250
+ class BlipTextEmbeddings(nn.Module):
251
+ def __init__(self, config: BlipTextConfig):
252
+ super().__init__()
253
+ embed_dim = config.hidden_size
254
+
255
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
256
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
257
+
258
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
259
+ self.register_buffer(
260
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
261
+ )
262
+
263
+ def forward(
264
+ self,
265
+ input_ids: Optional[torch.LongTensor] = None,
266
+ position_ids: Optional[torch.LongTensor] = None,
267
+ inputs_embeds: Optional[torch.FloatTensor] = None,
268
+ ) -> torch.Tensor:
269
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
270
+
271
+ if position_ids is None:
272
+ position_ids = self.position_ids[:, :seq_length]
273
+
274
+ if inputs_embeds is None:
275
+ inputs_embeds = self.token_embedding(input_ids)
276
+
277
+ position_embeddings = self.position_embedding(position_ids)
278
+ embeddings = inputs_embeds + position_embeddings
279
+
280
+ return embeddings
281
+
282
+
283
+ class BlipAttention(nn.Module):
284
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
285
+
286
+ def __init__(self, config):
287
+ super().__init__()
288
+ self.config = config
289
+ self.embed_dim = config.hidden_size
290
+ self.num_heads = config.num_attention_heads
291
+ self.head_dim = self.embed_dim // self.num_heads
292
+ if self.head_dim * self.num_heads != self.embed_dim:
293
+ raise ValueError(
294
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
295
+ f" {self.num_heads})."
296
+ )
297
+ self.scale = self.head_dim**-0.5
298
+ self.dropout = nn.Dropout(config.attention_dropout)
299
+
300
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim)
301
+
302
+ self.projection = nn.Linear(self.embed_dim, self.embed_dim)
303
+
304
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
305
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.Tensor,
310
+ head_mask: Optional[torch.Tensor] = None,
311
+ output_attentions: Optional[bool] = False,
312
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
313
+ """Input shape: Batch x Time x Channel"""
314
+
315
+ bsz, tgt_len, embed_dim = hidden_states.size()
316
+
317
+ mixed_qkv = (
318
+ self.qkv(hidden_states)
319
+ .reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads)
320
+ .permute(2, 0, 3, 1, 4)
321
+ )
322
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
323
+
324
+ # Take the dot product between "query" and "key" to get the raw attention scores.
325
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
326
+
327
+ attention_scores = attention_scores * self.scale
328
+
329
+ # Normalize the attention scores to probabilities.
330
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
331
+
332
+ # This is actually dropping out entire tokens to attend to, which might
333
+ # seem a bit unusual, but is taken from the original Transformer paper.
334
+ attention_probs = self.dropout(attention_probs)
335
+
336
+ # Mask heads if we want to
337
+ if head_mask is not None:
338
+ attention_probs = attention_probs * head_mask
339
+
340
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
341
+
342
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
343
+ context_layer = context_layer.reshape(new_context_layer_shape)
344
+
345
+ output = self.projection(context_layer)
346
+
347
+ outputs = (output, attention_probs) if output_attentions else (output, None)
348
+
349
+ return outputs
350
+
351
+
352
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Blip
353
+ class BlipMLP(nn.Module):
354
+ def __init__(self, config):
355
+ super().__init__()
356
+ self.config = config
357
+ self.activation_fn = ACT2FN[config.hidden_act]
358
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
359
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
360
+
361
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
362
+ hidden_states = self.fc1(hidden_states)
363
+ hidden_states = self.activation_fn(hidden_states)
364
+ hidden_states = self.fc2(hidden_states)
365
+ return hidden_states
366
+
367
+
368
+ class BlipEncoderLayer(nn.Module):
369
+ def __init__(self, config: BlipConfig):
370
+ super().__init__()
371
+ self.embed_dim = config.hidden_size
372
+ self.self_attn = BlipAttention(config)
373
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
374
+ self.mlp = BlipMLP(config)
375
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
376
+
377
+ def forward(
378
+ self,
379
+ hidden_states: torch.Tensor,
380
+ attention_mask: torch.Tensor,
381
+ output_attentions: Optional[bool] = False,
382
+ ) -> Tuple[torch.FloatTensor]:
383
+ """
384
+ Args:
385
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
386
+ attention_mask (`torch.FloatTensor`): attention mask of size
387
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
388
+ `(config.encoder_attention_heads,)`.
389
+ output_attentions (`bool`, *optional*):
390
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
391
+ returned tensors for more detail.
392
+ """
393
+ residual = hidden_states
394
+
395
+ hidden_states = self.layer_norm1(hidden_states)
396
+ hidden_states, attn_weights = self.self_attn(
397
+ hidden_states=hidden_states,
398
+ head_mask=attention_mask,
399
+ output_attentions=output_attentions,
400
+ )
401
+ hidden_states = hidden_states + residual
402
+ residual = hidden_states
403
+ hidden_states = self.layer_norm2(hidden_states)
404
+ hidden_states = self.mlp(hidden_states)
405
+
406
+ hidden_states = hidden_states + residual
407
+
408
+ outputs = (hidden_states,)
409
+
410
+ if output_attentions:
411
+ outputs += (attn_weights,)
412
+
413
+ return outputs
414
+
415
+
416
+ class BlipPreTrainedModel(PreTrainedModel):
417
+ """
418
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
419
+ models.
420
+ """
421
+
422
+ config_class = BlipConfig
423
+ base_model_prefix = "blip"
424
+ supports_gradient_checkpointing = True
425
+
426
+ def _init_weights(self, module):
427
+ """Initialize the weights"""
428
+ factor = self.config.initializer_range
429
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
430
+ module.weight.data.normal_(mean=0.0, std=factor)
431
+ if hasattr(module, "bias") and module.bias is not None:
432
+ module.bias.data.zero_()
433
+
434
+ if isinstance(module, BlipVisionEmbeddings):
435
+ if hasattr(self.config, "vision_config"):
436
+ factor = self.config.vision_config.initializer_range
437
+ nn.init.trunc_normal_(
438
+ module.position_embedding,
439
+ mean=0.0,
440
+ std=factor,
441
+ )
442
+
443
+ nn.init.trunc_normal_(
444
+ module.class_embedding,
445
+ mean=0.0,
446
+ std=factor,
447
+ )
448
+
449
+ elif isinstance(module, nn.LayerNorm):
450
+ module.bias.data.zero_()
451
+ module.weight.data.fill_(1.0)
452
+ elif isinstance(module, nn.Linear) and module.bias is not None:
453
+ module.bias.data.zero_()
454
+
455
+
456
+ BLIP_START_DOCSTRING = r"""
457
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
458
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
459
+ etc.)
460
+
461
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
462
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
463
+ and behavior.
464
+
465
+ Parameters:
466
+ config ([`BlipConfig`]): Model configuration class with all the parameters of the model.
467
+ Initializing with a config file does not load the weights associated with the model, only the
468
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
469
+ """
470
+
471
+ BLIP_TEXT_INPUTS_DOCSTRING = r"""
472
+ Args:
473
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
474
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
475
+ it.
476
+
477
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
478
+
479
+ [What are input IDs?](../glossary#input-ids)
480
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
481
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
482
+
483
+ - 1 for tokens that are **not masked**,
484
+ - 0 for tokens that are **masked**.
485
+
486
+ [What are attention masks?](../glossary#attention-mask)
487
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
488
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
489
+ config.max_position_embeddings - 1]`.
490
+
491
+ [What are position IDs?](../glossary#position-ids)
492
+ output_attentions (`bool`, *optional*):
493
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
494
+ tensors for more detail.
495
+ output_hidden_states (`bool`, *optional*):
496
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
497
+ more detail.
498
+ return_dict (`bool`, *optional*):
499
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
500
+ """
501
+
502
+ BLIP_VISION_INPUTS_DOCSTRING = r"""
503
+ Args:
504
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
505
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
506
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
507
+ output_attentions (`bool`, *optional*):
508
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
509
+ tensors for more detail.
510
+ output_hidden_states (`bool`, *optional*):
511
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
512
+ more detail.
513
+ return_dict (`bool`, *optional*):
514
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
515
+ """
516
+
517
+ BLIP_INPUTS_DOCSTRING = r"""
518
+ Args:
519
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
520
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
521
+ it.
522
+
523
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
524
+
525
+ [What are input IDs?](../glossary#input-ids)
526
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
527
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
528
+
529
+ - 1 for tokens that are **not masked**,
530
+ - 0 for tokens that are **masked**.
531
+
532
+ [What are attention masks?](../glossary#attention-mask)
533
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
534
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
535
+ config.max_position_embeddings - 1]`.
536
+
537
+ [What are position IDs?](../glossary#position-ids)
538
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
539
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
540
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
541
+ return_loss (`bool`, *optional*):
542
+ Whether or not to return the contrastive loss.
543
+ output_attentions (`bool`, *optional*):
544
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
545
+ tensors for more detail.
546
+ output_hidden_states (`bool`, *optional*):
547
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
548
+ more detail.
549
+ return_dict (`bool`, *optional*):
550
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
551
+ """
552
+
553
+
554
+ class BlipEncoder(nn.Module):
555
+ """
556
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
557
+ [`BlipEncoderLayer`].
558
+
559
+ Args:
560
+ config (`BlipConfig`):
561
+ The corresponding vision configuration for the `BlipEncoder`.
562
+ """
563
+
564
+ def __init__(self, config: BlipConfig):
565
+ super().__init__()
566
+ self.config = config
567
+ self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
568
+ self.gradient_checkpointing = False
569
+
570
+ def forward(
571
+ self,
572
+ inputs_embeds,
573
+ attention_mask: Optional[torch.Tensor] = None,
574
+ output_attentions: Optional[bool] = None,
575
+ output_hidden_states: Optional[bool] = None,
576
+ return_dict: Optional[bool] = None,
577
+ ) -> Union[Tuple, BaseModelOutput]:
578
+ r"""
579
+ Args:
580
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
581
+ Embedded representation of the inputs. Should be float, not int tokens.
582
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
583
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
584
+
585
+ - 1 for tokens that are **not masked**,
586
+ - 0 for tokens that are **masked**.
587
+
588
+ [What are attention masks?](../glossary#attention-mask)
589
+ output_attentions (`bool`, *optional*):
590
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
591
+ returned tensors for more detail.
592
+ output_hidden_states (`bool`, *optional*):
593
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
594
+ for more detail.
595
+ return_dict (`bool`, *optional*):
596
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
597
+ """
598
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
599
+ output_hidden_states = (
600
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
601
+ )
602
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
603
+
604
+ encoder_states = () if output_hidden_states else None
605
+ all_attentions = () if output_attentions else None
606
+
607
+ hidden_states = inputs_embeds
608
+ for idx, encoder_layer in enumerate(self.layers):
609
+ if output_hidden_states:
610
+ encoder_states = encoder_states + (hidden_states,)
611
+ if self.gradient_checkpointing and self.training:
612
+ layer_outputs = self._gradient_checkpointing_func(
613
+ encoder_layer.__call__,
614
+ hidden_states,
615
+ attention_mask,
616
+ output_attentions,
617
+ )
618
+ else:
619
+ layer_outputs = encoder_layer(
620
+ hidden_states,
621
+ attention_mask,
622
+ output_attentions=output_attentions,
623
+ )
624
+
625
+ hidden_states = layer_outputs[0]
626
+
627
+ if output_attentions:
628
+ all_attentions = all_attentions + (layer_outputs[1],)
629
+
630
+ if output_hidden_states:
631
+ encoder_states = encoder_states + (hidden_states,)
632
+
633
+ if not return_dict:
634
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
635
+ return BaseModelOutput(
636
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
637
+ )
638
+
639
+
640
+ class BlipVisionModel(BlipPreTrainedModel):
641
+ main_input_name = "pixel_values"
642
+ config_class = BlipVisionConfig
643
+
644
+ def __init__(self, config: BlipVisionConfig):
645
+ super().__init__(config)
646
+ self.config = config
647
+ embed_dim = config.hidden_size
648
+
649
+ self.embeddings = BlipVisionEmbeddings(config)
650
+ self.encoder = BlipEncoder(config)
651
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
652
+
653
+ self.post_init()
654
+
655
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
656
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=BlipVisionConfig)
657
+ def forward(
658
+ self,
659
+ pixel_values: Optional[torch.FloatTensor] = None,
660
+ output_attentions: Optional[bool] = None,
661
+ output_hidden_states: Optional[bool] = None,
662
+ return_dict: Optional[bool] = None,
663
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
664
+ r"""
665
+ Returns:
666
+
667
+ """
668
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
669
+ output_hidden_states = (
670
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
671
+ )
672
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
673
+
674
+ if pixel_values is None:
675
+ raise ValueError("You have to specify pixel_values")
676
+
677
+ hidden_states = self.embeddings(pixel_values)
678
+
679
+ encoder_outputs = self.encoder(
680
+ inputs_embeds=hidden_states,
681
+ output_attentions=output_attentions,
682
+ output_hidden_states=output_hidden_states,
683
+ return_dict=return_dict,
684
+ )
685
+
686
+ last_hidden_state = encoder_outputs[0]
687
+ last_hidden_state = self.post_layernorm(last_hidden_state)
688
+
689
+ pooled_output = last_hidden_state[:, 0, :]
690
+ pooled_output = self.post_layernorm(pooled_output)
691
+
692
+ if not return_dict:
693
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
694
+
695
+ return BaseModelOutputWithPooling(
696
+ last_hidden_state=last_hidden_state,
697
+ pooler_output=pooled_output,
698
+ hidden_states=encoder_outputs.hidden_states,
699
+ attentions=encoder_outputs.attentions,
700
+ )
701
+
702
+ def get_input_embeddings(self):
703
+ return self.embeddings
704
+
705
+
706
+ @add_start_docstrings(BLIP_START_DOCSTRING)
707
+ class BlipModel(BlipPreTrainedModel):
708
+ config_class = BlipConfig
709
+
710
+ def __init__(self, config: BlipConfig):
711
+ super().__init__(config)
712
+
713
+ if not isinstance(config.text_config, BlipTextConfig):
714
+ raise ValueError(
715
+ "config.text_config is expected to be of type BlipTextConfig but is of type"
716
+ f" {type(config.text_config)}."
717
+ )
718
+
719
+ if not isinstance(config.vision_config, BlipVisionConfig):
720
+ raise ValueError(
721
+ "config.vision_config is expected to be of type BlipVisionConfig but is of type"
722
+ f" {type(config.vision_config)}."
723
+ )
724
+
725
+ text_config = config.text_config
726
+ vision_config = config.vision_config
727
+
728
+ self.projection_dim = config.projection_dim
729
+ self.text_embed_dim = text_config.hidden_size
730
+ self.vision_embed_dim = vision_config.hidden_size
731
+
732
+ self.text_model = BlipTextModel(text_config)
733
+ self.vision_model = BlipVisionModel(vision_config)
734
+
735
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
736
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
737
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
738
+
739
+ # Initialize weights and apply final processing
740
+ self.post_init()
741
+
742
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
743
+ def get_text_features(
744
+ self,
745
+ input_ids: Optional[torch.Tensor] = None,
746
+ attention_mask: Optional[torch.Tensor] = None,
747
+ position_ids: Optional[torch.Tensor] = None,
748
+ return_dict: Optional[bool] = None,
749
+ ) -> torch.FloatTensor:
750
+ r"""
751
+ Returns:
752
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
753
+ applying the projection layer to the pooled output of [`BlipTextModel`].
754
+
755
+ Examples:
756
+
757
+ ```python
758
+ >>> from transformers import AutoProcessor, BlipModel
759
+
760
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
761
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
762
+
763
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
764
+ >>> text_features = model.get_text_features(**inputs)
765
+ ```"""
766
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
767
+
768
+ text_outputs = self.text_model(
769
+ input_ids=input_ids,
770
+ attention_mask=attention_mask,
771
+ position_ids=position_ids,
772
+ return_dict=return_dict,
773
+ )
774
+
775
+ pooled_output = text_outputs[1]
776
+ text_features = self.text_projection(pooled_output)
777
+
778
+ return text_features
779
+
780
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
781
+ def get_image_features(
782
+ self,
783
+ pixel_values: Optional[torch.FloatTensor] = None,
784
+ return_dict: Optional[bool] = None,
785
+ ) -> torch.FloatTensor:
786
+ r"""
787
+ Returns:
788
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
789
+ applying the projection layer to the pooled output of [`BlipVisionModel`].
790
+
791
+ Examples:
792
+
793
+ ```python
794
+ >>> from PIL import Image
795
+ >>> import requests
796
+ >>> from transformers import AutoProcessor, BlipModel
797
+
798
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
799
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
800
+
801
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
802
+ >>> image = Image.open(requests.get(url, stream=True).raw)
803
+
804
+ >>> inputs = processor(images=image, return_tensors="pt")
805
+
806
+ >>> image_features = model.get_image_features(**inputs)
807
+ ```"""
808
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
809
+
810
+ vision_outputs = self.vision_model(pixel_values=pixel_values, return_dict=return_dict)
811
+
812
+ pooled_output = vision_outputs[1] # pooled_output
813
+ image_features = self.visual_projection(pooled_output)
814
+
815
+ return image_features
816
+
817
+ @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING)
818
+ @replace_return_docstrings(output_type=BlipOutput, config_class=BlipConfig)
819
+ def forward(
820
+ self,
821
+ input_ids: Optional[torch.LongTensor] = None,
822
+ pixel_values: Optional[torch.FloatTensor] = None,
823
+ attention_mask: Optional[torch.Tensor] = None,
824
+ position_ids: Optional[torch.LongTensor] = None,
825
+ return_loss: Optional[bool] = None,
826
+ output_attentions: Optional[bool] = None,
827
+ output_hidden_states: Optional[bool] = None,
828
+ return_dict: Optional[bool] = None,
829
+ ) -> Union[Tuple, BlipOutput]:
830
+ r"""
831
+ Returns:
832
+
833
+ Examples:
834
+
835
+ ```python
836
+ >>> from PIL import Image
837
+ >>> import requests
838
+ >>> from transformers import AutoProcessor, BlipModel
839
+
840
+ >>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
841
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
842
+
843
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
844
+ >>> image = Image.open(requests.get(url, stream=True).raw)
845
+
846
+ >>> inputs = processor(
847
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
848
+ ... )
849
+
850
+ >>> outputs = model(**inputs)
851
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
852
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
853
+ ```"""
854
+ # Use BLIP model's config for some fields (if specified) instead of those of vision & text components.
855
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
856
+ output_hidden_states = (
857
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
858
+ )
859
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
860
+
861
+ vision_outputs = self.vision_model(
862
+ pixel_values=pixel_values,
863
+ output_attentions=output_attentions,
864
+ output_hidden_states=output_hidden_states,
865
+ return_dict=return_dict,
866
+ )
867
+
868
+ text_outputs = self.text_model(
869
+ input_ids=input_ids,
870
+ attention_mask=attention_mask,
871
+ position_ids=position_ids,
872
+ output_attentions=output_attentions,
873
+ output_hidden_states=output_hidden_states,
874
+ return_dict=return_dict,
875
+ )
876
+
877
+ image_embeds = vision_outputs[1]
878
+ image_embeds = self.visual_projection(image_embeds)
879
+
880
+ text_embeds = text_outputs[1]
881
+ text_embeds = self.text_projection(text_embeds)
882
+
883
+ # normalized features
884
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
885
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
886
+
887
+ # cosine similarity as logits
888
+ logit_scale = self.logit_scale.exp()
889
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
890
+ logits_per_image = logits_per_text.t()
891
+
892
+ loss = None
893
+ if return_loss:
894
+ loss = blip_loss(logits_per_text)
895
+
896
+ if not return_dict:
897
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
898
+ return ((loss,) + output) if loss is not None else output
899
+
900
+ return BlipOutput(
901
+ loss=loss,
902
+ logits_per_image=logits_per_image,
903
+ logits_per_text=logits_per_text,
904
+ text_embeds=text_embeds,
905
+ image_embeds=image_embeds,
906
+ text_model_output=text_outputs,
907
+ vision_model_output=vision_outputs,
908
+ )
909
+
910
+
911
+ @add_start_docstrings(
912
+ """
913
+ BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass
914
+ `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,
915
+ the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption
916
+ from the text input. If no text input is provided, the decoder will start with the [BOS] token only.
917
+ """,
918
+ BLIP_START_DOCSTRING,
919
+ )
920
+ class BlipForConditionalGeneration(BlipPreTrainedModel):
921
+ config_class = BlipConfig
922
+ _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"]
923
+ main_input_name = "pixel_values"
924
+
925
+ def __init__(self, config: BlipConfig):
926
+ super().__init__(config)
927
+
928
+ self.vision_model = BlipVisionModel(config.vision_config)
929
+
930
+ self.text_decoder = BlipTextLMHeadModel(config.text_config)
931
+
932
+ self.decoder_input_ids = config.text_config.bos_token_id
933
+ self.decoder_pad_token_id = config.text_config.pad_token_id
934
+
935
+ # Initialize weights and apply final processing
936
+ self.post_init()
937
+
938
+ def get_input_embeddings(self) -> nn.Module:
939
+ return self.vision_model.embeddings.patch_embedding
940
+
941
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
942
+ @replace_return_docstrings(output_type=BlipForConditionalGenerationModelOutput, config_class=BlipVisionConfig)
943
+ def forward(
944
+ self,
945
+ pixel_values: torch.FloatTensor,
946
+ input_ids: Optional[torch.LongTensor] = None,
947
+ attention_mask: Optional[torch.LongTensor] = None,
948
+ output_attentions: Optional[bool] = None,
949
+ output_hidden_states: Optional[bool] = None,
950
+ labels: Optional[torch.LongTensor] = None,
951
+ return_dict: Optional[bool] = None,
952
+ ) -> Union[Tuple, BlipForConditionalGenerationModelOutput]:
953
+ r"""
954
+ Returns:
955
+
956
+ Examples:
957
+
958
+ ```python
959
+ >>> from PIL import Image
960
+ >>> import requests
961
+ >>> from transformers import AutoProcessor, BlipForConditionalGeneration
962
+
963
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
964
+ >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
965
+
966
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
967
+ >>> image = Image.open(requests.get(url, stream=True).raw)
968
+ >>> text = "A picture of"
969
+
970
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
971
+
972
+ >>> outputs = model(**inputs)
973
+ ```"""
974
+
975
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
976
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
977
+ output_hidden_states = (
978
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
979
+ )
980
+
981
+ vision_outputs = self.vision_model(
982
+ pixel_values=pixel_values,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ )
987
+
988
+ image_embeds = vision_outputs[0]
989
+
990
+ outputs = self.text_decoder(
991
+ input_ids=input_ids,
992
+ attention_mask=attention_mask,
993
+ encoder_hidden_states=image_embeds,
994
+ labels=labels,
995
+ return_dict=return_dict,
996
+ reduction="mean",
997
+ )
998
+
999
+ if not return_dict:
1000
+ outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:]
1001
+ return tuple(output for output in outputs if output is not None)
1002
+
1003
+ return BlipForConditionalGenerationModelOutput(
1004
+ loss=outputs.loss,
1005
+ logits=outputs.logits,
1006
+ image_embeds=image_embeds,
1007
+ last_hidden_state=vision_outputs.last_hidden_state,
1008
+ hidden_states=vision_outputs.hidden_states,
1009
+ attentions=vision_outputs.attentions,
1010
+ )
1011
+
1012
+ @torch.no_grad()
1013
+ def generate(
1014
+ self,
1015
+ pixel_values: torch.FloatTensor,
1016
+ input_ids: Optional[torch.LongTensor] = None,
1017
+ attention_mask: Optional[torch.LongTensor] = None,
1018
+ **generate_kwargs,
1019
+ ) -> torch.LongTensor:
1020
+ r"""
1021
+ Overrides *generate* function to be able to use the model as a conditional generator
1022
+
1023
+ Parameters:
1024
+ pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*:
1025
+ Input image to be processed
1026
+ input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
1027
+ The sequence used as a prompt for the generation.
1028
+ attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
1029
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1030
+
1031
+
1032
+ Examples:
1033
+ ```python
1034
+ >>> from PIL import Image
1035
+ >>> import requests
1036
+ >>> from transformers import AutoProcessor, BlipForConditionalGeneration
1037
+
1038
+ >>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
1039
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1040
+
1041
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1042
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1043
+
1044
+ >>> inputs = processor(images=image, return_tensors="pt")
1045
+
1046
+ >>> outputs = model.generate(**inputs)
1047
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1048
+ two cats sleeping on a couch
1049
+ ```
1050
+ """
1051
+
1052
+ batch_size = pixel_values.shape[0]
1053
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
1054
+
1055
+ image_embeds = vision_outputs[0]
1056
+
1057
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device)
1058
+
1059
+ if isinstance(input_ids, list):
1060
+ input_ids = torch.LongTensor(input_ids)
1061
+ elif input_ids is None:
1062
+ input_ids = (
1063
+ torch.LongTensor([[self.decoder_input_ids, self.config.text_config.eos_token_id]])
1064
+ .repeat(batch_size, 1)
1065
+ .to(image_embeds.device)
1066
+ )
1067
+
1068
+ input_ids[:, 0] = self.config.text_config.bos_token_id
1069
+ attention_mask = attention_mask[:, :-1] if attention_mask is not None else None
1070
+
1071
+ outputs = self.text_decoder.generate(
1072
+ input_ids=input_ids[:, :-1],
1073
+ eos_token_id=self.config.text_config.sep_token_id,
1074
+ pad_token_id=self.config.text_config.pad_token_id,
1075
+ attention_mask=attention_mask,
1076
+ encoder_hidden_states=image_embeds,
1077
+ encoder_attention_mask=image_attention_mask,
1078
+ **generate_kwargs,
1079
+ )
1080
+
1081
+ return outputs
1082
+
1083
+
1084
+ @add_start_docstrings(
1085
+ """
1086
+ BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text
1087
+ decoder. The vision encoder will encode the input image, the text encoder will encode the input question together
1088
+ with the encoding of the image, and the text decoder will output the answer to the question.
1089
+ """,
1090
+ BLIP_START_DOCSTRING,
1091
+ )
1092
+ class BlipForQuestionAnswering(BlipPreTrainedModel):
1093
+ config_class = BlipConfig
1094
+ _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"]
1095
+
1096
+ def __init__(self, config: BlipConfig):
1097
+ super().__init__(config)
1098
+
1099
+ self.vision_model = BlipVisionModel(config.vision_config)
1100
+
1101
+ self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False)
1102
+
1103
+ self.text_decoder = BlipTextLMHeadModel(config.text_config)
1104
+
1105
+ self.decoder_pad_token_id = config.text_config.pad_token_id
1106
+ self.decoder_start_token_id = config.text_config.bos_token_id
1107
+
1108
+ # Initialize weights and apply final processing
1109
+ self.post_init()
1110
+
1111
+ def get_input_embeddings(self) -> nn.Module:
1112
+ return self.vision_model.embeddings.patch_embedding
1113
+
1114
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1115
+ @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig)
1116
+ def forward(
1117
+ self,
1118
+ input_ids: torch.LongTensor,
1119
+ pixel_values: torch.FloatTensor,
1120
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1121
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1122
+ attention_mask: Optional[torch.LongTensor] = None,
1123
+ output_attentions: Optional[bool] = None,
1124
+ output_hidden_states: Optional[bool] = None,
1125
+ labels: Optional[torch.LongTensor] = None,
1126
+ return_dict: Optional[bool] = None,
1127
+ ) -> Union[Tuple, BlipTextVisionModelOutput]:
1128
+ r"""
1129
+ Returns:
1130
+
1131
+ Examples:
1132
+
1133
+ ```python
1134
+ >>> from PIL import Image
1135
+ >>> import requests
1136
+ >>> from transformers import AutoProcessor, BlipForQuestionAnswering
1137
+
1138
+ >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
1139
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
1140
+
1141
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1142
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1143
+
1144
+ >>> # training
1145
+ >>> text = "How many cats are in the picture?"
1146
+ >>> label = "2"
1147
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
1148
+ >>> labels = processor(text=label, return_tensors="pt").input_ids
1149
+
1150
+ >>> inputs["labels"] = labels
1151
+ >>> outputs = model(**inputs)
1152
+ >>> loss = outputs.loss
1153
+ >>> loss.backward()
1154
+
1155
+ >>> # inference
1156
+ >>> text = "How many cats are in the picture?"
1157
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
1158
+ >>> outputs = model.generate(**inputs)
1159
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1160
+ 2
1161
+ ```"""
1162
+ if labels is None and decoder_input_ids is None:
1163
+ raise ValueError(
1164
+ "Either `decoder_input_ids` or `labels` should be passed when calling `forward` with"
1165
+ " `BlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you"
1166
+ " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`"
1167
+ )
1168
+
1169
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1170
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1171
+ output_hidden_states = (
1172
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1173
+ )
1174
+
1175
+ vision_outputs = self.vision_model(
1176
+ pixel_values=pixel_values,
1177
+ output_attentions=output_attentions,
1178
+ output_hidden_states=output_hidden_states,
1179
+ return_dict=return_dict,
1180
+ )
1181
+
1182
+ image_embeds = vision_outputs[0]
1183
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
1184
+
1185
+ question_embeds = self.text_encoder(
1186
+ input_ids=input_ids,
1187
+ attention_mask=attention_mask,
1188
+ encoder_hidden_states=image_embeds,
1189
+ encoder_attention_mask=image_attention_mask,
1190
+ return_dict=return_dict,
1191
+ )
1192
+
1193
+ if labels is not None and decoder_input_ids is None:
1194
+ # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153
1195
+ decoder_input_ids = labels
1196
+
1197
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
1198
+
1199
+ answer_output = self.text_decoder(
1200
+ input_ids=decoder_input_ids,
1201
+ attention_mask=decoder_attention_mask,
1202
+ encoder_hidden_states=question_embeds,
1203
+ encoder_attention_mask=attention_mask,
1204
+ labels=labels,
1205
+ return_dict=return_dict,
1206
+ reduction="mean",
1207
+ )
1208
+
1209
+ if labels is not None:
1210
+ decoder_loss = answer_output.loss.mean() if return_dict else answer_output[0].mean()
1211
+ else:
1212
+ decoder_loss = None
1213
+
1214
+ if not return_dict:
1215
+ outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:]
1216
+ return tuple(output for output in outputs if output is not None)
1217
+
1218
+ return BlipTextVisionModelOutput(
1219
+ loss=decoder_loss,
1220
+ image_embeds=image_embeds,
1221
+ last_hidden_state=vision_outputs.last_hidden_state,
1222
+ hidden_states=vision_outputs.hidden_states,
1223
+ attentions=vision_outputs.attentions,
1224
+ )
1225
+
1226
+ @torch.no_grad()
1227
+ def generate(
1228
+ self,
1229
+ input_ids: torch.LongTensor,
1230
+ pixel_values: torch.FloatTensor,
1231
+ attention_mask: Optional[torch.LongTensor] = None,
1232
+ **generate_kwargs,
1233
+ ) -> torch.LongTensor:
1234
+ r"""
1235
+ Overrides *generate* function to be able to use the model as a conditional generator
1236
+
1237
+ Parameters:
1238
+ input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*):
1239
+ The sequence used as a prompt for the generation.
1240
+ pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*:
1241
+ Input image to be processed
1242
+ attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
1243
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for
1244
+ tokens that are NOT MASKED, `0` for MASKED tokens.
1245
+ **generate_kwargs:
1246
+ Additional arguments passed to the *generate* function of the decoder
1247
+
1248
+
1249
+ Examples:
1250
+ ```python
1251
+ >>> from PIL import Image
1252
+ >>> import requests
1253
+ >>> from transformers import AutoProcessor, BlipForQuestionAnswering
1254
+
1255
+ >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
1256
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
1257
+
1258
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1259
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1260
+ >>> text = "How many cats are in the picture?"
1261
+
1262
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
1263
+
1264
+ >>> outputs = model.generate(**inputs)
1265
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1266
+ 2
1267
+ ```
1268
+ """
1269
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
1270
+
1271
+ image_embeds = vision_outputs[0]
1272
+
1273
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device)
1274
+
1275
+ if isinstance(input_ids, list):
1276
+ input_ids = torch.LongTensor(input_ids)
1277
+
1278
+ question_outputs = self.text_encoder(
1279
+ input_ids=input_ids,
1280
+ attention_mask=attention_mask,
1281
+ encoder_hidden_states=image_embeds,
1282
+ encoder_attention_mask=image_attention_mask,
1283
+ return_dict=False,
1284
+ )
1285
+
1286
+ question_embeds = question_outputs[0]
1287
+
1288
+ question_attention_mask = torch.ones(question_embeds.size()[:-1], dtype=torch.long).to(question_embeds.device)
1289
+
1290
+ bos_ids = torch.full(
1291
+ (question_embeds.size(0), 1), fill_value=self.decoder_start_token_id, device=question_embeds.device
1292
+ )
1293
+
1294
+ outputs = self.text_decoder.generate(
1295
+ input_ids=bos_ids,
1296
+ eos_token_id=self.config.text_config.sep_token_id,
1297
+ pad_token_id=self.config.text_config.pad_token_id,
1298
+ encoder_hidden_states=question_embeds,
1299
+ encoder_attention_mask=question_attention_mask,
1300
+ **generate_kwargs,
1301
+ )
1302
+
1303
+ return outputs
1304
+
1305
+
1306
+ @add_start_docstrings(
1307
+ """
1308
+ BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of
1309
+ image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to
1310
+ the image.
1311
+ """,
1312
+ BLIP_START_DOCSTRING,
1313
+ )
1314
+ class BlipForImageTextRetrieval(BlipPreTrainedModel):
1315
+ config_class = BlipConfig
1316
+
1317
+ def __init__(self, config: BlipConfig):
1318
+ super().__init__(config)
1319
+
1320
+ self.vision_model = BlipVisionModel(config.vision_config)
1321
+
1322
+ self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False)
1323
+
1324
+ # vision projection layer
1325
+ self.vision_proj = nn.Linear(config.vision_config.hidden_size, config.image_text_hidden_size)
1326
+
1327
+ # text projection layer
1328
+ self.text_proj = nn.Linear(config.text_config.hidden_size, config.image_text_hidden_size)
1329
+
1330
+ # image text matching head
1331
+ self.itm_head = nn.Linear(config.text_config.hidden_size, 2)
1332
+
1333
+ self.decoder_pad_token_id = (
1334
+ config.text_config.pad_token_id
1335
+ if not hasattr(config, "decoder_pad_token_id")
1336
+ else config.decoder_pad_token_id
1337
+ )
1338
+ self.decoder_start_token_id = (
1339
+ config.text_config.bos_token_id
1340
+ if not hasattr(config, "decoder_start_token_id")
1341
+ else config.decoder_start_token_id
1342
+ )
1343
+
1344
+ # Initialize weights and apply final processing
1345
+ self.post_init()
1346
+
1347
+ def get_input_embeddings(self) -> nn.Module:
1348
+ return self.vision_model.embeddings.patch_embedding
1349
+
1350
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1351
+ @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig)
1352
+ def forward(
1353
+ self,
1354
+ input_ids: torch.LongTensor,
1355
+ pixel_values: torch.FloatTensor,
1356
+ use_itm_head: Optional[bool] = True,
1357
+ attention_mask: Optional[torch.LongTensor] = None,
1358
+ output_attentions: Optional[bool] = None,
1359
+ output_hidden_states: Optional[bool] = None,
1360
+ return_dict: Optional[bool] = None,
1361
+ ) -> Union[Tuple, BlipTextVisionModelOutput]:
1362
+ r"""
1363
+ Returns:
1364
+
1365
+ Examples:
1366
+
1367
+ ```python
1368
+ >>> from PIL import Image
1369
+ >>> import requests
1370
+ >>> from transformers import AutoProcessor, BlipForImageTextRetrieval
1371
+
1372
+ >>> model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
1373
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
1374
+
1375
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1376
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1377
+ >>> text = "an image of a cat"
1378
+
1379
+ >>> inputs = processor(images=image, text=text, return_tensors="pt")
1380
+ >>> outputs = model(**inputs)
1381
+ ```
1382
+ """
1383
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1384
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1385
+ output_hidden_states = (
1386
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1387
+ )
1388
+
1389
+ vision_outputs = self.vision_model(
1390
+ pixel_values=pixel_values,
1391
+ output_attentions=output_attentions,
1392
+ output_hidden_states=output_hidden_states,
1393
+ return_dict=return_dict,
1394
+ )
1395
+
1396
+ image_embeds = vision_outputs[0]
1397
+ image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
1398
+
1399
+ if use_itm_head:
1400
+ question_embeds = self.text_encoder(
1401
+ input_ids=input_ids,
1402
+ attention_mask=attention_mask,
1403
+ encoder_hidden_states=image_embeds,
1404
+ encoder_attention_mask=image_atts,
1405
+ return_dict=return_dict,
1406
+ )
1407
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
1408
+
1409
+ output = self.itm_head(question_embeds[:, 0, :])
1410
+ else:
1411
+ question_embeds = self.text_encoder(
1412
+ input_ids=input_ids,
1413
+ attention_mask=attention_mask,
1414
+ return_dict=return_dict,
1415
+ )
1416
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
1417
+
1418
+ image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
1419
+ text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1)
1420
+
1421
+ output = image_feat @ text_feat.t()
1422
+
1423
+ if not return_dict:
1424
+ outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,)
1425
+ return tuple(output for output in outputs if output is not None)
1426
+
1427
+ return BlipImageTextMatchingModelOutput(
1428
+ itm_score=output,
1429
+ last_hidden_state=vision_outputs.last_hidden_state,
1430
+ hidden_states=vision_outputs.hidden_states,
1431
+ attentions=vision_outputs.attentions,
1432
+ question_embeds=question_embeds,
1433
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/modeling_blip_text.py ADDED
@@ -0,0 +1,945 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the BSD-3-clause license (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # https://opensource.org/licenses/BSD-3-Clause
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import math
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import Tensor, device, nn
23
+ from torch.nn import CrossEntropyLoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPoolingAndCrossAttentions,
29
+ CausalLMOutputWithCrossAttentions,
30
+ )
31
+ from ...modeling_utils import (
32
+ PreTrainedModel,
33
+ apply_chunking_to_forward,
34
+ find_pruneable_heads_and_indices,
35
+ prune_linear_layer,
36
+ )
37
+ from ...utils import logging
38
+ from .configuration_blip import BlipTextConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L52
45
+ class BlipTextEmbeddings(nn.Module):
46
+ """Construct the embeddings from word and position embeddings."""
47
+
48
+ def __init__(self, config):
49
+ super().__init__()
50
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
51
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
52
+
53
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
54
+ # any TensorFlow checkpoint file
55
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
56
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
57
+
58
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
59
+ self.register_buffer(
60
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
61
+ )
62
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
63
+
64
+ self.config = config
65
+
66
+ def forward(
67
+ self,
68
+ input_ids: Optional[torch.LongTensor] = None,
69
+ position_ids: Optional[torch.LongTensor] = None,
70
+ inputs_embeds: Optional[torch.FloatTensor] = None,
71
+ past_key_values_length: int = 0,
72
+ ) -> torch.Tensor:
73
+ if input_ids is not None:
74
+ input_shape = input_ids.size()
75
+ else:
76
+ input_shape = inputs_embeds.size()[:-1]
77
+
78
+ seq_length = input_shape[1]
79
+
80
+ if position_ids is None:
81
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
82
+
83
+ if inputs_embeds is None:
84
+ input_ids = input_ids.to(self.word_embeddings.weight.device)
85
+ inputs_embeds = self.word_embeddings(input_ids)
86
+
87
+ embeddings = inputs_embeds
88
+
89
+ if self.position_embedding_type == "absolute":
90
+ position_embeddings = self.position_embeddings(position_ids)
91
+ embeddings += position_embeddings
92
+ embeddings = self.LayerNorm(embeddings)
93
+ embeddings = self.dropout(embeddings)
94
+ return embeddings
95
+
96
+
97
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L97
98
+ class BlipTextSelfAttention(nn.Module):
99
+ def __init__(self, config, is_cross_attention):
100
+ super().__init__()
101
+ self.config = config
102
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
103
+ raise ValueError(
104
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
105
+ % (config.hidden_size, config.num_attention_heads)
106
+ )
107
+
108
+ self.num_attention_heads = config.num_attention_heads
109
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
110
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
111
+
112
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
113
+ if is_cross_attention:
114
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
115
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
116
+ else:
117
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
118
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
119
+
120
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
121
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
122
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
123
+ self.max_position_embeddings = config.max_position_embeddings
124
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
125
+
126
+ def save_attn_gradients(self, attn_gradients):
127
+ self.attn_gradients = attn_gradients
128
+
129
+ def get_attn_gradients(self):
130
+ return self.attn_gradients
131
+
132
+ def save_attention_map(self, attention_map):
133
+ self.attention_map = attention_map
134
+
135
+ def get_attention_map(self):
136
+ return self.attention_map
137
+
138
+ def transpose_for_scores(self, x):
139
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
140
+ x = x.view(*new_x_shape)
141
+ return x.permute(0, 2, 1, 3)
142
+
143
+ def forward(
144
+ self,
145
+ hidden_states: torch.Tensor,
146
+ attention_mask: Optional[torch.FloatTensor] = None,
147
+ head_mask: Optional[torch.FloatTensor] = None,
148
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
149
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
150
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
151
+ output_attentions: Optional[bool] = False,
152
+ ) -> Tuple[torch.Tensor]:
153
+ mixed_query_layer = self.query(hidden_states)
154
+
155
+ # If this is instantiated as a cross-attention module, the keys
156
+ # and values come from an encoder; the attention mask needs to be
157
+ # such that the encoder's padding tokens are not attended to.
158
+ is_cross_attention = encoder_hidden_states is not None
159
+
160
+ if is_cross_attention:
161
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
162
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
163
+ attention_mask = encoder_attention_mask
164
+ elif past_key_value is not None:
165
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
166
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
167
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
168
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
169
+ else:
170
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
171
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
172
+
173
+ query_layer = self.transpose_for_scores(mixed_query_layer)
174
+
175
+ past_key_value = (key_layer, value_layer)
176
+
177
+ # Take the dot product between "query" and "key" to get the raw attention scores.
178
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
179
+
180
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
181
+ seq_length = hidden_states.size()[1]
182
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
183
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
184
+ distance = position_ids_l - position_ids_r
185
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
186
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
187
+
188
+ if self.position_embedding_type == "relative_key":
189
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
190
+ attention_scores = attention_scores + relative_position_scores
191
+ elif self.position_embedding_type == "relative_key_query":
192
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
193
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
194
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
195
+
196
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
197
+ if attention_mask is not None:
198
+ # Apply the attention mask is (precomputed for all layers in BlipTextModel forward() function)
199
+ attention_scores = attention_scores + attention_mask.to(attention_scores.device)
200
+
201
+ # Normalize the attention scores to probabilities.
202
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
203
+
204
+ # This is actually dropping out entire tokens to attend to, which might
205
+ # seem a bit unusual, but is taken from the original Transformer paper.
206
+ attention_probs_dropped = self.dropout(attention_probs)
207
+
208
+ # Mask heads if we want to
209
+ if head_mask is not None:
210
+ attention_probs_dropped = attention_probs_dropped * head_mask
211
+
212
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
213
+
214
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
215
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
216
+ context_layer = context_layer.view(*new_context_layer_shape)
217
+
218
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
219
+
220
+ outputs = outputs + (past_key_value,)
221
+ return outputs
222
+
223
+
224
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert -> BlipText
225
+ class BlipTextSelfOutput(nn.Module):
226
+ def __init__(self, config):
227
+ super().__init__()
228
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
229
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
230
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
231
+
232
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
233
+ hidden_states = self.dense(hidden_states)
234
+ hidden_states = self.dropout(hidden_states)
235
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
236
+ return hidden_states
237
+
238
+
239
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#242
240
+ class BlipTextAttention(nn.Module):
241
+ def __init__(self, config, is_cross_attention=False):
242
+ super().__init__()
243
+ self.self = BlipTextSelfAttention(config, is_cross_attention)
244
+ self.output = BlipTextSelfOutput(config)
245
+ self.pruned_heads = set()
246
+
247
+ def prune_heads(self, heads):
248
+ if len(heads) == 0:
249
+ return
250
+ heads, index = find_pruneable_heads_and_indices(
251
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
252
+ )
253
+
254
+ # Prune linear layers
255
+ self.self.query = prune_linear_layer(self.self.query, index)
256
+ self.self.key = prune_linear_layer(self.self.key, index)
257
+ self.self.value = prune_linear_layer(self.self.value, index)
258
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
259
+
260
+ # Update hyper params and store pruned heads
261
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
262
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
263
+ self.pruned_heads = self.pruned_heads.union(heads)
264
+
265
+ def forward(
266
+ self,
267
+ hidden_states: torch.Tensor,
268
+ attention_mask: Optional[torch.FloatTensor] = None,
269
+ head_mask: Optional[torch.FloatTensor] = None,
270
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
271
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
272
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
273
+ output_attentions: Optional[bool] = False,
274
+ ) -> Tuple[torch.Tensor]:
275
+ self_outputs = self.self(
276
+ hidden_states,
277
+ attention_mask,
278
+ head_mask,
279
+ encoder_hidden_states,
280
+ encoder_attention_mask,
281
+ past_key_value,
282
+ output_attentions,
283
+ )
284
+ attention_output = self.output(self_outputs[0], hidden_states)
285
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
286
+ return outputs
287
+
288
+
289
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert -> BlipText
290
+ class BlipTextIntermediate(nn.Module):
291
+ def __init__(self, config):
292
+ super().__init__()
293
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
294
+ if isinstance(config.hidden_act, str):
295
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
296
+ else:
297
+ self.intermediate_act_fn = config.hidden_act
298
+
299
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
300
+ hidden_states = self.dense(hidden_states)
301
+ hidden_states = self.intermediate_act_fn(hidden_states)
302
+ return hidden_states
303
+
304
+
305
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert -> BlipText
306
+ class BlipTextOutput(nn.Module):
307
+ def __init__(self, config):
308
+ super().__init__()
309
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
310
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
311
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
312
+
313
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
314
+ hidden_states = self.dense(hidden_states)
315
+ hidden_states = self.dropout(hidden_states)
316
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
317
+ return hidden_states
318
+
319
+
320
+ class BlipTextLayer(nn.Module):
321
+ def __init__(self, config, layer_num):
322
+ super().__init__()
323
+ self.config = config
324
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
325
+ self.seq_len_dim = 1
326
+ self.attention = BlipTextAttention(config)
327
+ self.layer_num = layer_num
328
+ if self.config.is_decoder:
329
+ self.crossattention = BlipTextAttention(config, is_cross_attention=self.config.is_decoder)
330
+ self.intermediate = BlipTextIntermediate(config)
331
+ self.output = BlipTextOutput(config)
332
+
333
+ def forward(
334
+ self,
335
+ hidden_states: torch.Tensor,
336
+ attention_mask: Optional[torch.FloatTensor] = None,
337
+ head_mask: Optional[torch.FloatTensor] = None,
338
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
339
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
340
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
341
+ output_attentions: Optional[bool] = False,
342
+ ) -> Tuple[torch.Tensor]:
343
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
344
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
345
+ self_attention_outputs = self.attention(
346
+ hidden_states,
347
+ attention_mask,
348
+ head_mask,
349
+ output_attentions=output_attentions,
350
+ past_key_value=self_attn_past_key_value,
351
+ )
352
+ attention_output = self_attention_outputs[0]
353
+
354
+ outputs = self_attention_outputs[1:-1]
355
+ present_key_value = self_attention_outputs[-1]
356
+
357
+ if encoder_hidden_states is not None:
358
+ cross_attention_outputs = self.crossattention(
359
+ attention_output,
360
+ attention_mask,
361
+ head_mask,
362
+ encoder_hidden_states,
363
+ encoder_attention_mask,
364
+ output_attentions=output_attentions,
365
+ )
366
+ attention_output = cross_attention_outputs[0]
367
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
368
+ layer_output = apply_chunking_to_forward(
369
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
370
+ )
371
+ outputs = (layer_output,) + outputs
372
+
373
+ outputs = outputs + (present_key_value,)
374
+
375
+ return outputs
376
+
377
+ def feed_forward_chunk(self, attention_output):
378
+ intermediate_output = self.intermediate(attention_output)
379
+ layer_output = self.output(intermediate_output, attention_output)
380
+ return layer_output
381
+
382
+
383
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L386
384
+ class BlipTextEncoder(nn.Module):
385
+ def __init__(self, config):
386
+ super().__init__()
387
+ self.config = config
388
+ self.layer = nn.ModuleList([BlipTextLayer(config, i) for i in range(config.num_hidden_layers)])
389
+ self.gradient_checkpointing = False
390
+
391
+ def forward(
392
+ self,
393
+ hidden_states: torch.Tensor,
394
+ attention_mask: Optional[torch.FloatTensor] = None,
395
+ head_mask: Optional[torch.FloatTensor] = None,
396
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
397
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
398
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
399
+ use_cache: Optional[bool] = None,
400
+ output_attentions: Optional[bool] = False,
401
+ output_hidden_states: Optional[bool] = False,
402
+ return_dict: Optional[bool] = True,
403
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
404
+ if self.gradient_checkpointing and self.training:
405
+ if use_cache:
406
+ logger.warning(
407
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
408
+ )
409
+ use_cache = False
410
+ all_hidden_states = () if output_hidden_states else None
411
+ all_self_attentions = () if output_attentions else None
412
+ all_cross_attentions = () if output_attentions and self.config.is_decoder else None
413
+
414
+ next_decoder_cache = () if use_cache else None
415
+
416
+ for i in range(self.config.num_hidden_layers):
417
+ layer_module = self.layer[i]
418
+ if output_hidden_states:
419
+ all_hidden_states = all_hidden_states + (hidden_states,)
420
+
421
+ layer_head_mask = head_mask[i] if head_mask is not None else None
422
+ past_key_value = past_key_values[i] if past_key_values is not None else None
423
+
424
+ if self.gradient_checkpointing and self.training:
425
+ layer_outputs = self._gradient_checkpointing_func(
426
+ layer_module.__call__,
427
+ hidden_states,
428
+ attention_mask,
429
+ layer_head_mask,
430
+ encoder_hidden_states,
431
+ encoder_attention_mask,
432
+ past_key_value,
433
+ output_attentions,
434
+ )
435
+ else:
436
+ layer_outputs = layer_module(
437
+ hidden_states,
438
+ attention_mask,
439
+ layer_head_mask,
440
+ encoder_hidden_states,
441
+ encoder_attention_mask,
442
+ past_key_value,
443
+ output_attentions,
444
+ )
445
+
446
+ hidden_states = layer_outputs[0]
447
+ if use_cache:
448
+ next_decoder_cache += (layer_outputs[-1],)
449
+ if output_attentions:
450
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
451
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
452
+
453
+ if output_hidden_states:
454
+ all_hidden_states = all_hidden_states + (hidden_states,)
455
+
456
+ if not return_dict:
457
+ return tuple(
458
+ v
459
+ for v in [
460
+ hidden_states,
461
+ next_decoder_cache,
462
+ all_hidden_states,
463
+ all_self_attentions,
464
+ all_cross_attentions,
465
+ ]
466
+ if v is not None
467
+ )
468
+ return BaseModelOutputWithPastAndCrossAttentions(
469
+ last_hidden_state=hidden_states,
470
+ past_key_values=next_decoder_cache,
471
+ hidden_states=all_hidden_states,
472
+ attentions=all_self_attentions,
473
+ cross_attentions=all_cross_attentions,
474
+ )
475
+
476
+
477
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->BlipText
478
+ class BlipTextPooler(nn.Module):
479
+ def __init__(self, config):
480
+ super().__init__()
481
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
482
+ self.activation = nn.Tanh()
483
+
484
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
485
+ # We "pool" the model by simply taking the hidden state corresponding
486
+ # to the first token.
487
+ first_token_tensor = hidden_states[:, 0]
488
+ pooled_output = self.dense(first_token_tensor)
489
+ pooled_output = self.activation(pooled_output)
490
+ return pooled_output
491
+
492
+
493
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BlipText
494
+ class BlipTextPredictionHeadTransform(nn.Module):
495
+ def __init__(self, config):
496
+ super().__init__()
497
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
498
+ if isinstance(config.hidden_act, str):
499
+ self.transform_act_fn = ACT2FN[config.hidden_act]
500
+ else:
501
+ self.transform_act_fn = config.hidden_act
502
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
503
+
504
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
505
+ hidden_states = self.dense(hidden_states)
506
+ hidden_states = self.transform_act_fn(hidden_states)
507
+ hidden_states = self.LayerNorm(hidden_states)
508
+ return hidden_states
509
+
510
+
511
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BlipText
512
+ class BlipTextLMPredictionHead(nn.Module):
513
+ def __init__(self, config):
514
+ super().__init__()
515
+ self.transform = BlipTextPredictionHeadTransform(config)
516
+
517
+ # The output weights are the same as the input embeddings, but there is
518
+ # an output-only bias for each token.
519
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
520
+
521
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
522
+
523
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
524
+ self.decoder.bias = self.bias
525
+
526
+ def forward(self, hidden_states):
527
+ hidden_states = self.transform(hidden_states)
528
+ hidden_states = self.decoder(hidden_states)
529
+ return hidden_states
530
+
531
+
532
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BlipText
533
+ class BlipTextOnlyMLMHead(nn.Module):
534
+ def __init__(self, config):
535
+ super().__init__()
536
+ self.predictions = BlipTextLMPredictionHead(config)
537
+
538
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
539
+ prediction_scores = self.predictions(sequence_output)
540
+ return prediction_scores
541
+
542
+
543
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L548
544
+ class BlipTextPreTrainedModel(PreTrainedModel):
545
+ """
546
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
547
+ models.
548
+ """
549
+
550
+ config_class = BlipTextConfig
551
+ base_model_prefix = "bert"
552
+
553
+ def _init_weights(self, module):
554
+ """Initialize the weights"""
555
+ if isinstance(module, (nn.Linear, nn.Embedding)):
556
+ # Slightly different from the TF version which uses truncated_normal for initialization
557
+ # cf https://github.com/pytorch/pytorch/pull/5617
558
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
559
+ elif isinstance(module, nn.LayerNorm):
560
+ module.bias.data.zero_()
561
+ module.weight.data.fill_(1.0)
562
+ if isinstance(module, nn.Linear) and module.bias is not None:
563
+ module.bias.data.zero_()
564
+
565
+
566
+ # Adapted from https://github.com/salesforce/BLIP/blob/3a29b7410476bf5f2ba0955827390eb6ea1f4f9d/models/med.py#L571
567
+ class BlipTextModel(BlipTextPreTrainedModel):
568
+ """
569
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
570
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
571
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
572
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an
573
+ `encoder_hidden_states` is then expected as an input to the forward pass.
574
+ """
575
+
576
+ def __init__(self, config, add_pooling_layer=True):
577
+ super().__init__(config)
578
+ self.config = config
579
+
580
+ self.embeddings = BlipTextEmbeddings(config)
581
+ self.encoder = BlipTextEncoder(config)
582
+ self.pooler = BlipTextPooler(config) if add_pooling_layer else None
583
+
584
+ self.post_init()
585
+
586
+ def get_input_embeddings(self):
587
+ return self.embeddings.word_embeddings
588
+
589
+ def set_input_embeddings(self, value):
590
+ self.embeddings.word_embeddings = value
591
+
592
+ # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads
593
+ def _prune_heads(self, heads_to_prune):
594
+ """
595
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
596
+ class PreTrainedModel
597
+ """
598
+ for layer, heads in heads_to_prune.items():
599
+ self.encoder.layer[layer].attention.prune_heads(heads)
600
+
601
+ def get_extended_attention_mask(
602
+ self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool
603
+ ) -> Tensor:
604
+ """
605
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
606
+
607
+ Arguments:
608
+ attention_mask (`torch.Tensor`):
609
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
610
+ input_shape (`Tuple[int]`):
611
+ The shape of the input to the model.
612
+ device (`torch.device`):
613
+ The device of the input to the model.
614
+
615
+ Returns:
616
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
617
+ """
618
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
619
+ # ourselves in which case we just need to make it broadcastable to all heads.
620
+ if attention_mask.dim() == 3:
621
+ extended_attention_mask = attention_mask[:, None, :, :]
622
+ elif attention_mask.dim() == 2:
623
+ # Provided a padding mask of dimensions [batch_size, seq_length]
624
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
625
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
626
+ if is_decoder:
627
+ batch_size, seq_length = input_shape
628
+
629
+ seq_ids = torch.arange(seq_length, device=device)
630
+ causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
631
+ # in case past_key_values are used we need to add a prefix ones mask to the causal mask
632
+ # causal and attention masks must have same type with pytorch version < 1.3
633
+ causal_mask = causal_mask.to(attention_mask.dtype)
634
+
635
+ if causal_mask.shape[1] < attention_mask.shape[1]:
636
+ prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
637
+ causal_mask = torch.cat(
638
+ [
639
+ torch.ones(
640
+ (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
641
+ ),
642
+ causal_mask,
643
+ ],
644
+ axis=-1,
645
+ )
646
+
647
+ extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
648
+ else:
649
+ extended_attention_mask = attention_mask[:, None, None, :]
650
+ else:
651
+ raise ValueError(
652
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
653
+ input_shape, attention_mask.shape
654
+ )
655
+ )
656
+
657
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
658
+ # masked positions, this operation will create a tensor which is 0.0 for
659
+ # positions we want to attend and -10000.0 for masked positions.
660
+ # Since we are adding it to the raw scores before the softmax, this is
661
+ # effectively the same as removing these entirely.
662
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
663
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
664
+ return extended_attention_mask
665
+
666
+ def forward(
667
+ self,
668
+ input_ids: Optional[torch.Tensor] = None,
669
+ attention_mask: Optional[torch.Tensor] = None,
670
+ position_ids: Optional[torch.Tensor] = None,
671
+ head_mask: Optional[torch.Tensor] = None,
672
+ inputs_embeds: Optional[torch.Tensor] = None,
673
+ encoder_embeds: Optional[torch.Tensor] = None,
674
+ encoder_hidden_states: Optional[torch.Tensor] = None,
675
+ encoder_attention_mask: Optional[torch.Tensor] = None,
676
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
677
+ use_cache: Optional[bool] = None,
678
+ output_attentions: Optional[bool] = None,
679
+ output_hidden_states: Optional[bool] = None,
680
+ return_dict: Optional[bool] = None,
681
+ is_decoder: Optional[bool] = False,
682
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
683
+ r"""
684
+ encoder_hidden_states (`torch.FloatTensor`, *optional*):
685
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
686
+ the model is configured as a decoder.
687
+ encoder_attention_mask (`torch.FloatTensor`, *optional*):
688
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
689
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
690
+ - 1 for tokens that are **not masked**,
691
+ - 0 for tokens that are **masked**.
692
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*):
693
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
694
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
695
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
696
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
697
+ use_cache (`bool`, *optional*):
698
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
699
+ `past_key_values`).
700
+ """
701
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
702
+ output_hidden_states = (
703
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
704
+ )
705
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
706
+
707
+ if is_decoder:
708
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
709
+ else:
710
+ use_cache = False
711
+
712
+ if input_ids is not None and inputs_embeds is not None:
713
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
714
+ elif input_ids is not None:
715
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
716
+ input_shape = input_ids.size()
717
+ batch_size, seq_length = input_shape
718
+ device = input_ids.device
719
+ elif inputs_embeds is not None:
720
+ input_shape = inputs_embeds.size()[:-1]
721
+ batch_size, seq_length = input_shape
722
+ device = inputs_embeds.device
723
+ elif encoder_embeds is not None:
724
+ input_shape = encoder_embeds.size()[:-1]
725
+ batch_size, seq_length = input_shape
726
+ device = encoder_embeds.device
727
+ else:
728
+ raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
729
+
730
+ # past_key_values_length
731
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
732
+
733
+ if attention_mask is None:
734
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length))).to(device)
735
+
736
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
737
+ # ourselves in which case we just need to make it broadcastable to all heads.
738
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
739
+ attention_mask, input_shape, device, is_decoder
740
+ )
741
+
742
+ # If a 2D or 3D attention mask is provided for the cross-attention
743
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
744
+ if encoder_hidden_states is not None:
745
+ if isinstance(encoder_hidden_states, list):
746
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
747
+ else:
748
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
749
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
750
+
751
+ if isinstance(encoder_attention_mask, list):
752
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
753
+ elif encoder_attention_mask is None:
754
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
755
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
756
+ else:
757
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
758
+ else:
759
+ encoder_extended_attention_mask = None
760
+
761
+ # Prepare head mask if needed
762
+ # 1.0 in head_mask indicate we keep the head
763
+ # attention_probs has shape bsz x n_heads x N x N
764
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
765
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
766
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
767
+
768
+ if encoder_embeds is None:
769
+ embedding_output = self.embeddings(
770
+ input_ids=input_ids,
771
+ position_ids=position_ids,
772
+ inputs_embeds=inputs_embeds,
773
+ past_key_values_length=past_key_values_length,
774
+ )
775
+ else:
776
+ embedding_output = encoder_embeds
777
+
778
+ encoder_outputs = self.encoder(
779
+ embedding_output,
780
+ attention_mask=extended_attention_mask,
781
+ head_mask=head_mask,
782
+ encoder_hidden_states=encoder_hidden_states,
783
+ encoder_attention_mask=encoder_extended_attention_mask,
784
+ past_key_values=past_key_values,
785
+ use_cache=use_cache,
786
+ output_attentions=output_attentions,
787
+ output_hidden_states=output_hidden_states,
788
+ return_dict=return_dict,
789
+ )
790
+ sequence_output = encoder_outputs[0]
791
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
792
+
793
+ if not return_dict:
794
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
795
+
796
+ return BaseModelOutputWithPoolingAndCrossAttentions(
797
+ last_hidden_state=sequence_output,
798
+ pooler_output=pooled_output,
799
+ past_key_values=encoder_outputs.past_key_values,
800
+ hidden_states=encoder_outputs.hidden_states,
801
+ attentions=encoder_outputs.attentions,
802
+ cross_attentions=encoder_outputs.cross_attentions,
803
+ )
804
+
805
+
806
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L811
807
+ class BlipTextLMHeadModel(BlipTextPreTrainedModel):
808
+ def __init__(self, config):
809
+ super().__init__(config)
810
+
811
+ self.bert = BlipTextModel(config, add_pooling_layer=False)
812
+ self.cls = BlipTextOnlyMLMHead(config)
813
+ self.label_smoothing = config.label_smoothing
814
+
815
+ def get_output_embeddings(self):
816
+ return self.cls.predictions.decoder
817
+
818
+ def set_output_embeddings(self, new_embeddings):
819
+ self.cls.predictions.decoder = new_embeddings
820
+
821
+ def forward(
822
+ self,
823
+ input_ids: Optional[torch.Tensor] = None,
824
+ attention_mask: Optional[torch.Tensor] = None,
825
+ position_ids: Optional[torch.Tensor] = None,
826
+ head_mask: Optional[torch.Tensor] = None,
827
+ inputs_embeds: Optional[torch.Tensor] = None,
828
+ encoder_hidden_states: Optional[torch.Tensor] = None,
829
+ encoder_attention_mask: Optional[torch.Tensor] = None,
830
+ labels: Optional[torch.Tensor] = None,
831
+ past_key_values: Optional[List[torch.Tensor]] = None,
832
+ use_cache: Optional[bool] = None,
833
+ output_attentions: Optional[bool] = None,
834
+ output_hidden_states: Optional[bool] = None,
835
+ return_dict: Optional[bool] = None,
836
+ return_logits: Optional[bool] = False,
837
+ is_decoder: Optional[bool] = True,
838
+ reduction: Optional[str] = "mean",
839
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
840
+ r"""
841
+ encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of
842
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is
843
+ configured as a decoder.
844
+ encoder_attention_mask (`torch.FloatTensor`, *optional*):
845
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
846
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
847
+ - 1 for tokens that are **not masked**,
848
+ - 0 for tokens that are **masked**.
849
+ labels (`torch.LongTensor`, *optional*):
850
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
851
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
852
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
853
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*):
854
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
855
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
856
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
857
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
858
+ use_cache (`bool`, *optional*):
859
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
860
+ `past_key_values`).
861
+ """
862
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
863
+ if labels is not None:
864
+ use_cache = False
865
+
866
+ outputs = self.bert(
867
+ input_ids,
868
+ attention_mask=attention_mask,
869
+ position_ids=position_ids,
870
+ head_mask=head_mask,
871
+ inputs_embeds=inputs_embeds,
872
+ encoder_hidden_states=encoder_hidden_states,
873
+ encoder_attention_mask=encoder_attention_mask,
874
+ past_key_values=past_key_values,
875
+ use_cache=use_cache,
876
+ output_attentions=output_attentions,
877
+ output_hidden_states=output_hidden_states,
878
+ return_dict=return_dict,
879
+ is_decoder=is_decoder,
880
+ )
881
+
882
+ sequence_output = outputs[0]
883
+ prediction_scores = self.cls(sequence_output)
884
+
885
+ if return_logits:
886
+ return prediction_scores[:, :-1, :].contiguous()
887
+
888
+ lm_loss = None
889
+ if labels is not None:
890
+ # we are doing next-token prediction; shift prediction scores and input ids by one
891
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
892
+ labels = labels[:, 1:].contiguous().to(shifted_prediction_scores.device)
893
+ loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=self.label_smoothing)
894
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
895
+ if reduction == "none":
896
+ lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
897
+
898
+ if not return_dict:
899
+ output = (prediction_scores,) + outputs[2:]
900
+ return ((lm_loss,) + output) if lm_loss is not None else output
901
+
902
+ return CausalLMOutputWithCrossAttentions(
903
+ loss=lm_loss,
904
+ logits=prediction_scores,
905
+ past_key_values=outputs.past_key_values,
906
+ hidden_states=outputs.hidden_states,
907
+ attentions=outputs.attentions,
908
+ cross_attentions=outputs.cross_attentions,
909
+ )
910
+
911
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
912
+ input_shape = input_ids.shape
913
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
914
+ if attention_mask is None:
915
+ attention_mask = input_ids.new_ones(input_shape)
916
+
917
+ # cut decoder_input_ids if past_key_values is used
918
+ if past_key_values is not None:
919
+ past_length = past_key_values[0][0].shape[2]
920
+
921
+ # Some generation methods already pass only the last input ID
922
+ if input_ids.shape[1] > past_length:
923
+ remove_prefix_length = past_length
924
+ else:
925
+ # Default to old behavior: keep only final ID
926
+ remove_prefix_length = input_ids.shape[1] - 1
927
+
928
+ input_ids = input_ids[:, remove_prefix_length:]
929
+
930
+ return {
931
+ "input_ids": input_ids,
932
+ "attention_mask": attention_mask,
933
+ "past_key_values": past_key_values,
934
+ "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
935
+ "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
936
+ "is_decoder": True,
937
+ }
938
+
939
+ def _reorder_cache(self, past_key_values, beam_idx):
940
+ reordered_past = ()
941
+ for layer_past in past_key_values:
942
+ reordered_past += (
943
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
944
+ )
945
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip.py ADDED
@@ -0,0 +1,1701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TensorFlow BLIP model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ import warnings
20
+ from dataclasses import dataclass
21
+ from typing import Any, Optional, Tuple, Union
22
+
23
+ import tensorflow as tf
24
+
25
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
26
+ from ...modeling_tf_utils import (
27
+ TFPreTrainedModel,
28
+ get_initializer,
29
+ get_tf_activation,
30
+ keras,
31
+ keras_serializable,
32
+ shape_list,
33
+ unpack_inputs,
34
+ )
35
+ from ...tf_utils import check_embeddings_within_bounds, stable_softmax
36
+ from ...utils import (
37
+ ModelOutput,
38
+ add_start_docstrings,
39
+ add_start_docstrings_to_model_forward,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig
44
+ from .modeling_tf_blip_text import BLIP_TEXT_INPUTS_DOCSTRING, TFBlipTextLMHeadModel, TFBlipTextModel
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base"
50
+
51
+
52
+ from ..deprecated._archive_maps import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ # Copied from transformers.models.clip.modeling_tf_clip.contrastive_loss
56
+ def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
57
+ return tf.math.reduce_mean(
58
+ keras.metrics.sparse_categorical_crossentropy(
59
+ y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
60
+ )
61
+ )
62
+
63
+
64
+ # Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->blip
65
+ def blip_loss(similarity: tf.Tensor) -> tf.Tensor:
66
+ caption_loss = contrastive_loss(similarity)
67
+ image_loss = contrastive_loss(tf.transpose(similarity))
68
+ return (caption_loss + image_loss) / 2.0
69
+
70
+
71
+ @dataclass
72
+ class TFBlipForConditionalGenerationModelOutput(ModelOutput):
73
+ """
74
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
75
+ last hidden states. This class also adds the loss term from the text decoder.
76
+
77
+ Args:
78
+ loss (`tf.Tensor`, *optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
79
+ Languge modeling loss from the text decoder.
80
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
81
+ Prediction scores of the language modeling head of the text decoder model.
82
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)`, *optional*):
83
+ The image embeddings obtained after applying the Vision Transformer model to the input image.
84
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
85
+ Sequence of hidden-states at the output of the last layer of the model.
86
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True`):
87
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
88
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
89
+
90
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
91
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed):
92
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
93
+ sequence_length)`.
94
+
95
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
96
+ heads.`
97
+ """
98
+
99
+ loss: Tuple[tf.Tensor] | None = None
100
+ logits: Tuple[tf.Tensor] | None = None
101
+ image_embeds: tf.Tensor | None = None
102
+ last_hidden_state: tf.Tensor = None
103
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
104
+ attentions: Tuple[tf.Tensor, ...] | None = None
105
+
106
+ @property
107
+ def decoder_logits(self):
108
+ warnings.warn(
109
+ "`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers."
110
+ " Please use the `logits` attribute to retrieve the final output instead.",
111
+ FutureWarning,
112
+ )
113
+ return self.logits
114
+
115
+
116
+ @dataclass
117
+ class TFBlipTextVisionModelOutput(ModelOutput):
118
+ """
119
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
120
+ last hidden states. This class also adds the loss term from the text decoder.
121
+
122
+ Args:
123
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
124
+ Languge modeling loss from the text decoder.
125
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
126
+ The image embeddings obtained by applying the projection layer to the pooler_output.
127
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
128
+ Sequence of hidden-states at the output of the last layer of the model.
129
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
130
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
131
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
132
+
133
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
134
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
135
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
136
+ sequence_length)`.
137
+
138
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
139
+ heads.
140
+ """
141
+
142
+ loss: tf.Tensor | None = None
143
+ image_embeds: tf.Tensor | None = None
144
+ last_hidden_state: tf.Tensor = None
145
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
146
+ attentions: Tuple[tf.Tensor, ...] | None = None
147
+
148
+
149
+ @dataclass
150
+ class TFBlipImageTextMatchingModelOutput(ModelOutput):
151
+ """
152
+ Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
153
+ last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity
154
+ scores.
155
+
156
+ Args:
157
+ itm_score (`tf.Tensor`):
158
+ The image-text similarity scores.
159
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
160
+ Languge modeling loss from the text decoder.
161
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
162
+ The image embeddings obtained by applying the projection layer to the pooler_output.
163
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
164
+ Sequence of hidden-states at the output of the last layer of the model.
165
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
166
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
167
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
168
+
169
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
170
+ vision_pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`, *optional*):
171
+ Last layer hidden-state of the vision of the vision-only branch of the model.
172
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
173
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
174
+ sequence_length)`.
175
+
176
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
177
+ heads.
178
+ question_embeds (`tf.Tensor`):
179
+ The question embeddings obtained by the text projection layer.
180
+ """
181
+
182
+ itm_score: tf.Tensor | None = None
183
+ loss: tf.Tensor | None = None
184
+ image_embeds: tf.Tensor | None = None
185
+ last_hidden_state: tf.Tensor = None
186
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
187
+ vision_pooler_output: tf.Tensor | None = None
188
+ attentions: Tuple[tf.Tensor, ...] | None = None
189
+ question_embeds: Tuple[tf.Tensor] | None = None
190
+
191
+
192
+ @dataclass
193
+ class TFBlipOutput(ModelOutput):
194
+ """
195
+ Args:
196
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
197
+ Contrastive loss for image-text similarity.
198
+ logits_per_image:(`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
199
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
200
+ similarity scores.
201
+ logits_per_text:(`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
202
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
203
+ similarity scores.
204
+ text_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
205
+ The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`].
206
+ image_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
207
+ The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`].
208
+ text_model_output(`BaseModelOutputWithPooling`):
209
+ The output of the [`BlipTextModel`].
210
+ vision_model_output(`BaseModelOutputWithPooling`):
211
+ The output of the [`BlipVisionModel`].
212
+ """
213
+
214
+ loss: tf.Tensor | None = None
215
+ logits_per_image: tf.Tensor = None
216
+ logits_per_text: tf.Tensor = None
217
+ text_embeds: tf.Tensor = None
218
+ image_embeds: tf.Tensor = None
219
+ text_model_output: TFBaseModelOutputWithPooling = None
220
+ vision_model_output: TFBaseModelOutputWithPooling = None
221
+
222
+ def to_tuple(self) -> Tuple[Any]:
223
+ return tuple(
224
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
225
+ for k in self.keys()
226
+ )
227
+
228
+
229
+ class TFBlipVisionEmbeddings(keras.layers.Layer):
230
+ def __init__(self, config: BlipVisionConfig, **kwargs):
231
+ super().__init__(**kwargs)
232
+ self.config = config
233
+ self.embed_dim = config.hidden_size
234
+ self.image_size = config.image_size
235
+ self.patch_size = config.patch_size
236
+
237
+ self.patch_embedding = keras.layers.Conv2D(
238
+ filters=self.embed_dim,
239
+ kernel_size=self.patch_size,
240
+ strides=self.patch_size,
241
+ kernel_initializer=get_initializer(self.config.initializer_range),
242
+ data_format="channels_last",
243
+ name="patch_embedding",
244
+ )
245
+
246
+ self.num_patches = (self.image_size // self.patch_size) ** 2
247
+ self.num_positions = self.num_patches + 1
248
+
249
+ def build(self, input_shape=None):
250
+ self.class_embedding = self.add_weight(
251
+ shape=(1, 1, self.embed_dim),
252
+ initializer=get_initializer(self.config.initializer_range),
253
+ trainable=True,
254
+ name="class_embedding",
255
+ )
256
+
257
+ self.position_embedding = self.add_weight(
258
+ shape=(1, self.num_positions, self.embed_dim),
259
+ initializer=get_initializer(self.config.initializer_range),
260
+ trainable=True,
261
+ name="position_embedding",
262
+ )
263
+
264
+ if self.built:
265
+ return
266
+ self.built = True
267
+ if getattr(self, "patch_embedding", None) is not None:
268
+ with tf.name_scope(self.patch_embedding.name):
269
+ self.patch_embedding.build([None, None, None, 3])
270
+
271
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
272
+ # Input is channels-first, we transpose. PyTorch transposes after the conv because PyTorch
273
+ # likes channels-first convs.
274
+ batch_size = tf.shape(pixel_values)[0]
275
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
276
+ patch_embeds = self.patch_embedding(pixel_values)
277
+ patch_embeds = tf.reshape(patch_embeds, (batch_size, self.num_patches, -1))
278
+
279
+ class_embeds = tf.broadcast_to(self.class_embedding, (batch_size, 1, self.embed_dim))
280
+ embeddings = tf.concat([class_embeds, patch_embeds], axis=1)
281
+ embeddings = embeddings + self.position_embedding[:, : tf.shape(embeddings)[1], :]
282
+ return embeddings
283
+
284
+
285
+ # Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->Blip
286
+ class TFBlipTextEmbeddings(keras.layers.Layer):
287
+ def __init__(self, config: BlipTextConfig, **kwargs):
288
+ super().__init__(**kwargs)
289
+
290
+ self.embed_dim = config.hidden_size
291
+
292
+ self.config = config
293
+
294
+ def build(self, input_shape: tf.TensorShape = None):
295
+ with tf.name_scope("token_embedding"):
296
+ self.weight = self.add_weight(
297
+ shape=(self.config.vocab_size, self.embed_dim),
298
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
299
+ trainable=True,
300
+ name="weight",
301
+ )
302
+
303
+ with tf.name_scope("position_embedding"):
304
+ self.position_embedding = self.add_weight(
305
+ shape=(self.config.max_position_embeddings, self.embed_dim),
306
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
307
+ trainable=True,
308
+ name="embeddings",
309
+ )
310
+
311
+ super().build(input_shape)
312
+
313
+ def call(
314
+ self,
315
+ input_ids: tf.Tensor = None,
316
+ position_ids: tf.Tensor = None,
317
+ inputs_embeds: tf.Tensor = None,
318
+ ) -> tf.Tensor:
319
+ """
320
+ Applies embedding based on inputs tensor.
321
+
322
+ Returns:
323
+ final_embeddings (`tf.Tensor`): output embedding tensor.
324
+ """
325
+ if input_ids is None and inputs_embeds is None:
326
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
327
+
328
+ if inputs_embeds is None:
329
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
330
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
331
+
332
+ input_shape = shape_list(inputs_embeds)[:-1]
333
+
334
+ if position_ids is None:
335
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
336
+
337
+ position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
338
+ position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
339
+ final_embeddings = inputs_embeds + position_embeds
340
+
341
+ return final_embeddings
342
+
343
+
344
+ class TFBlipAttention(keras.layers.Layer):
345
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
346
+
347
+ def __init__(self, config, **kwargs):
348
+ super().__init__(**kwargs)
349
+ self.config = config
350
+ self.embed_dim = config.hidden_size
351
+ self.num_heads = config.num_attention_heads
352
+ self.head_dim = self.embed_dim // self.num_heads
353
+ if self.head_dim * self.num_heads != self.embed_dim:
354
+ raise ValueError(
355
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
356
+ f" {self.num_heads})."
357
+ )
358
+ self.scale = self.head_dim**-0.5
359
+ self.dropout = keras.layers.Dropout(config.attention_dropout, name="dropout")
360
+
361
+ self.qkv = keras.layers.Dense(
362
+ 3 * self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="qkv"
363
+ )
364
+
365
+ self.projection = keras.layers.Dense(
366
+ self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="projection"
367
+ )
368
+
369
+ def call(
370
+ self,
371
+ hidden_states: tf.Tensor,
372
+ head_mask: tf.Tensor | None = None,
373
+ output_attentions: Optional[bool] = False,
374
+ training: Optional[bool] = None,
375
+ ) -> Tuple[tf.Tensor, tf.Tensor | None, Tuple[tf.Tensor] | None]:
376
+ """Input shape: Batch x Time x Channel"""
377
+
378
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
379
+
380
+ mixed_qkv = self.qkv(hidden_states)
381
+ mixed_qkv = tf.reshape(mixed_qkv, (bsz, tgt_len, 3, self.num_heads, self.head_dim))
382
+ mixed_qkv = tf.transpose(mixed_qkv, perm=(2, 0, 3, 1, 4))
383
+
384
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
385
+
386
+ # Take the dot product between "query" and "key" to get the raw attention scores.
387
+ attention_scores = query_states @ tf.transpose(key_states, (0, 1, 3, 2))
388
+
389
+ attention_scores = attention_scores * self.scale
390
+
391
+ # Normalize the attention scores to probabilities.
392
+ attention_probs = stable_softmax(attention_scores, axis=-1)
393
+
394
+ # This is actually dropping out entire tokens to attend to, which might
395
+ # seem a bit unusual, but is taken from the original Transformer paper.
396
+ attention_probs = self.dropout(attention_probs, training=training)
397
+
398
+ # Mask heads if we want to
399
+ if head_mask is not None:
400
+ attention_probs = attention_probs * head_mask
401
+
402
+ context_layer = tf.transpose(attention_probs @ value_states, perm=(0, 2, 1, 3))
403
+
404
+ new_context_layer_shape = shape_list(context_layer)[:-2] + [self.embed_dim]
405
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
406
+
407
+ output = self.projection(context_layer)
408
+
409
+ outputs = (output, attention_probs) if output_attentions else (output, None)
410
+
411
+ return outputs
412
+
413
+ def build(self, input_shape=None):
414
+ if self.built:
415
+ return
416
+ self.built = True
417
+ if getattr(self, "dropout", None) is not None:
418
+ with tf.name_scope(self.dropout.name):
419
+ self.dropout.build(None)
420
+ if getattr(self, "qkv", None) is not None:
421
+ with tf.name_scope(self.qkv.name):
422
+ self.qkv.build([None, None, self.embed_dim])
423
+ if getattr(self, "projection", None) is not None:
424
+ with tf.name_scope(self.projection.name):
425
+ self.projection.build([None, None, self.embed_dim])
426
+
427
+
428
+ class TFBlipMLP(keras.layers.Layer):
429
+ def __init__(self, config: BlipConfig, **kwargs):
430
+ super().__init__(**kwargs)
431
+
432
+ self.activation_fn = get_tf_activation(config.hidden_act)
433
+
434
+ in_proj_std = (config.hidden_size**-0.5) * ((2 * config.num_hidden_layers) ** -0.5)
435
+ fc_std = (2 * config.hidden_size) ** -0.5
436
+
437
+ self.fc1 = keras.layers.Dense(
438
+ units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name="fc1"
439
+ )
440
+ self.fc2 = keras.layers.Dense(
441
+ units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name="fc2"
442
+ )
443
+ self.config = config
444
+
445
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
446
+ hidden_states = self.fc1(inputs=hidden_states)
447
+ hidden_states = self.activation_fn(hidden_states)
448
+ hidden_states = self.fc2(inputs=hidden_states)
449
+ return hidden_states
450
+
451
+ def build(self, input_shape=None):
452
+ if self.built:
453
+ return
454
+ self.built = True
455
+ if getattr(self, "fc1", None) is not None:
456
+ with tf.name_scope(self.fc1.name):
457
+ self.fc1.build([None, None, self.config.hidden_size])
458
+ if getattr(self, "fc2", None) is not None:
459
+ with tf.name_scope(self.fc2.name):
460
+ self.fc2.build([None, None, self.config.intermediate_size])
461
+
462
+
463
+ class TFBlipEncoderLayer(keras.layers.Layer):
464
+ def __init__(self, config: BlipConfig, **kwargs):
465
+ super().__init__(**kwargs)
466
+ self.embed_dim = config.hidden_size
467
+ self.self_attn = TFBlipAttention(config, name="self_attn")
468
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
469
+ self.mlp = TFBlipMLP(config, name="mlp")
470
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
471
+
472
+ def call(
473
+ self,
474
+ hidden_states: tf.Tensor,
475
+ attention_mask: tf.Tensor,
476
+ output_attentions: Optional[bool] = False,
477
+ training: Optional[bool] = None,
478
+ ) -> Tuple[tf.Tensor]:
479
+ """
480
+ Args:
481
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
482
+ attention_mask (`tf.Tensor`): attention mask of size
483
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
484
+ `(config.encoder_attention_heads,)`.
485
+ output_attentions (`bool`, *optional*):
486
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
487
+ returned tensors for more detail.
488
+ """
489
+ residual = hidden_states
490
+
491
+ hidden_states = self.layer_norm1(hidden_states)
492
+ hidden_states, attn_weights = self.self_attn(
493
+ hidden_states=hidden_states,
494
+ head_mask=attention_mask,
495
+ output_attentions=output_attentions,
496
+ training=training,
497
+ )
498
+ hidden_states = hidden_states + residual
499
+ residual = hidden_states
500
+ hidden_states = self.layer_norm2(hidden_states)
501
+ hidden_states = self.mlp(hidden_states)
502
+
503
+ hidden_states = hidden_states + residual
504
+
505
+ outputs = (hidden_states,)
506
+
507
+ if output_attentions:
508
+ outputs += (attn_weights,)
509
+
510
+ return outputs
511
+
512
+ def build(self, input_shape=None):
513
+ if self.built:
514
+ return
515
+ self.built = True
516
+ if getattr(self, "self_attn", None) is not None:
517
+ with tf.name_scope(self.self_attn.name):
518
+ self.self_attn.build(None)
519
+ if getattr(self, "layer_norm1", None) is not None:
520
+ with tf.name_scope(self.layer_norm1.name):
521
+ self.layer_norm1.build([None, None, self.embed_dim])
522
+ if getattr(self, "mlp", None) is not None:
523
+ with tf.name_scope(self.mlp.name):
524
+ self.mlp.build(None)
525
+ if getattr(self, "layer_norm2", None) is not None:
526
+ with tf.name_scope(self.layer_norm2.name):
527
+ self.layer_norm2.build([None, None, self.embed_dim])
528
+
529
+
530
+ class TFBlipPreTrainedModel(TFPreTrainedModel):
531
+ """
532
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
533
+ models.
534
+ """
535
+
536
+ config_class = BlipConfig
537
+ base_model_prefix = "blip"
538
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
539
+
540
+
541
+ BLIP_START_DOCSTRING = r"""
542
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
543
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
544
+ etc.)
545
+
546
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
547
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
548
+ behavior.
549
+
550
+ Parameters:
551
+ config ([`BlipConfig`]): Model configuration class with all the parameters of the model.
552
+ Initializing with a config file does not load the weights associated with the model, only the
553
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
554
+ """
555
+
556
+ BLIP_VISION_INPUTS_DOCSTRING = r"""
557
+ Args:
558
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
559
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
560
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
561
+ output_attentions (`bool`, *optional*):
562
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
563
+ tensors for more detail.
564
+ output_hidden_states (`bool`, *optional*):
565
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
566
+ more detail.
567
+ return_dict (`bool`, *optional*):
568
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
569
+ """
570
+
571
+ BLIP_INPUTS_DOCSTRING = r"""
572
+ Args:
573
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
574
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
575
+ it.
576
+
577
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
578
+
579
+ [What are input IDs?](../glossary#input-ids)
580
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
581
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
582
+
583
+ - 1 for tokens that are **not masked**,
584
+ - 0 for tokens that are **masked**.
585
+
586
+ [What are attention masks?](../glossary#attention-mask)
587
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
588
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
589
+ config.max_position_embeddings - 1]`.
590
+
591
+ [What are position IDs?](../glossary#position-ids)
592
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
593
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
594
+ [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
595
+ return_loss (`bool`, *optional*):
596
+ Whether or not to return the contrastive loss.
597
+ output_attentions (`bool`, *optional*):
598
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
599
+ tensors for more detail.
600
+ output_hidden_states (`bool`, *optional*):
601
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
602
+ more detail.
603
+ return_dict (`bool`, *optional*):
604
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
605
+ """
606
+
607
+
608
+ @keras_serializable
609
+ class TFBlipEncoder(keras.layers.Layer):
610
+ config_class = BlipConfig
611
+ """
612
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
613
+ [`BlipEncoderLayer`].
614
+
615
+ Args:
616
+ config (`BlipConfig`):
617
+ The corresponding vision configuration for the `BlipEncoder`.
618
+ """
619
+
620
+ def __init__(self, config: BlipConfig, **kwargs):
621
+ super().__init__(**kwargs)
622
+ self.config = config
623
+ self.layers = [TFBlipEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
624
+
625
+ @unpack_inputs
626
+ def call(
627
+ self,
628
+ inputs_embeds,
629
+ attention_mask: tf.Tensor | None = None,
630
+ output_attentions: Optional[bool] = None,
631
+ output_hidden_states: Optional[bool] = None,
632
+ return_dict: Optional[bool] = None,
633
+ training: Optional[bool] = None,
634
+ ) -> Union[Tuple, TFBaseModelOutput]:
635
+ r"""
636
+ Args:
637
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
638
+ Embedded representation of the inputs. Should be float, not int tokens.
639
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
640
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
641
+
642
+ - 1 for tokens that are **not masked**,
643
+ - 0 for tokens that are **masked**.
644
+
645
+ [What are attention masks?](../glossary#attention-mask)
646
+ output_attentions (`bool`, *optional*):
647
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
648
+ returned tensors for more detail.
649
+ output_hidden_states (`bool`, *optional*):
650
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
651
+ for more detail.
652
+ return_dict (`bool`, *optional*):
653
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
654
+ """
655
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
656
+ output_hidden_states = (
657
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
658
+ )
659
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
660
+
661
+ encoder_states = () if output_hidden_states else None
662
+ all_attentions = () if output_attentions else None
663
+
664
+ hidden_states = inputs_embeds
665
+ for idx, encoder_layer in enumerate(self.layers):
666
+ if output_hidden_states:
667
+ encoder_states = encoder_states + (hidden_states,)
668
+ layer_outputs = encoder_layer(
669
+ hidden_states,
670
+ attention_mask,
671
+ output_attentions=output_attentions,
672
+ training=training,
673
+ )
674
+
675
+ hidden_states = layer_outputs[0]
676
+
677
+ if output_attentions:
678
+ all_attentions = all_attentions + (layer_outputs[1],)
679
+
680
+ if output_hidden_states:
681
+ encoder_states = encoder_states + (hidden_states,)
682
+
683
+ if not return_dict:
684
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
685
+ return TFBaseModelOutput(
686
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
687
+ )
688
+
689
+ def build(self, input_shape=None):
690
+ if self.built:
691
+ return
692
+ self.built = True
693
+ if getattr(self, "layers", None) is not None:
694
+ for layer in self.layers:
695
+ with tf.name_scope(layer.name):
696
+ layer.build(None)
697
+
698
+
699
+ class TFBlipVisionModel(TFBlipPreTrainedModel):
700
+ main_input_name = "pixel_values"
701
+ config_class = BlipVisionConfig
702
+
703
+ def __init__(self, config: BlipVisionConfig, *args, **kwargs):
704
+ super().__init__(config, *args, **kwargs)
705
+ self.config = config
706
+
707
+ self.embeddings = TFBlipVisionEmbeddings(config, name="embeddings")
708
+ self.encoder = TFBlipEncoder(config, name="encoder")
709
+ self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm")
710
+ self.embed_dim = config.hidden_size
711
+
712
+ def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
713
+ hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
714
+ attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
715
+
716
+ return TFBaseModelOutputWithPooling(
717
+ last_hidden_state=output.last_hidden_state,
718
+ pooler_output=output.pooler_output,
719
+ hidden_states=hs,
720
+ attentions=attns,
721
+ )
722
+
723
+ @unpack_inputs
724
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
725
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=BlipVisionConfig)
726
+ def call(
727
+ self,
728
+ pixel_values: tf.Tensor | None = None,
729
+ output_attentions: Optional[bool] = None,
730
+ output_hidden_states: Optional[bool] = None,
731
+ return_dict: Optional[bool] = None,
732
+ training: Optional[bool] = None,
733
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
734
+ r"""
735
+ Returns:
736
+
737
+ """
738
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
739
+ output_hidden_states = (
740
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
741
+ )
742
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
743
+
744
+ if pixel_values is None:
745
+ raise ValueError("You have to specify pixel_values")
746
+
747
+ hidden_states = self.embeddings(pixel_values)
748
+
749
+ encoder_outputs = self.encoder(
750
+ inputs_embeds=hidden_states,
751
+ output_attentions=output_attentions,
752
+ output_hidden_states=output_hidden_states,
753
+ return_dict=return_dict,
754
+ training=training,
755
+ )
756
+
757
+ last_hidden_state = encoder_outputs[0]
758
+ last_hidden_state = self.post_layernorm(last_hidden_state)
759
+
760
+ pooled_output = last_hidden_state[:, 0, :]
761
+ # TF gets confused if we call the layer with inputs of different ranks, so insert a singleton dimension
762
+ pooled_output = self.post_layernorm(tf.expand_dims(pooled_output, 1))
763
+ pooled_output = tf.squeeze(pooled_output, 1)
764
+
765
+ if not return_dict:
766
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
767
+
768
+ return TFBaseModelOutputWithPooling(
769
+ last_hidden_state=last_hidden_state,
770
+ pooler_output=pooled_output,
771
+ hidden_states=encoder_outputs.hidden_states,
772
+ attentions=encoder_outputs.attentions,
773
+ )
774
+
775
+ def get_input_embeddings(self):
776
+ return self.embeddings
777
+
778
+ def build(self, input_shape=None):
779
+ if self.built:
780
+ return
781
+ self.built = True
782
+ if getattr(self, "embeddings", None) is not None:
783
+ with tf.name_scope(self.embeddings.name):
784
+ self.embeddings.build(None)
785
+ if getattr(self, "encoder", None) is not None:
786
+ with tf.name_scope(self.encoder.name):
787
+ self.encoder.build(None)
788
+ if getattr(self, "post_layernorm", None) is not None:
789
+ with tf.name_scope(self.post_layernorm.name):
790
+ self.post_layernorm.build([None, None, self.embed_dim])
791
+
792
+
793
+ class TFBlipMainLayer(keras.layers.Layer):
794
+ config_class = BlipConfig
795
+
796
+ def __init__(self, config: BlipConfig, *args, **kwargs):
797
+ super().__init__(*args, **kwargs)
798
+
799
+ if not isinstance(config.text_config, BlipTextConfig):
800
+ raise ValueError(
801
+ "config.text_config is expected to be of type BlipTextConfig but is of type"
802
+ f" {type(config.text_config)}."
803
+ )
804
+
805
+ if not isinstance(config.vision_config, BlipVisionConfig):
806
+ raise ValueError(
807
+ "config.vision_config is expected to be of type BlipVisionConfig but is of type"
808
+ f" {type(config.vision_config)}."
809
+ )
810
+
811
+ text_config = config.text_config
812
+ vision_config = config.vision_config
813
+
814
+ self.projection_dim = config.projection_dim
815
+ self.text_embed_dim = text_config.hidden_size
816
+ self.vision_embed_dim = vision_config.hidden_size
817
+
818
+ self.text_model = TFBlipTextModel(text_config, name="text_model")
819
+ self.vision_model = TFBlipVisionModel(vision_config, name="vision_model")
820
+
821
+ self.visual_projection = keras.layers.Dense(
822
+ self.projection_dim,
823
+ use_bias=False,
824
+ kernel_initializer=get_initializer(config.initializer_range),
825
+ name="visual_projection",
826
+ )
827
+ self.text_projection = keras.layers.Dense(
828
+ self.projection_dim,
829
+ use_bias=False,
830
+ kernel_initializer=get_initializer(config.initializer_range),
831
+ name="text_projection",
832
+ )
833
+
834
+ self.config = config
835
+
836
+ def build(self, input_shape=None):
837
+ self.logit_scale = self.add_weight(
838
+ name="logit_scale",
839
+ shape=[],
840
+ initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
841
+ trainable=True,
842
+ )
843
+
844
+ if self.built:
845
+ return
846
+ self.built = True
847
+ if getattr(self, "text_model", None) is not None:
848
+ with tf.name_scope(self.text_model.name):
849
+ self.text_model.build(None)
850
+ if getattr(self, "vision_model", None) is not None:
851
+ with tf.name_scope(self.vision_model.name):
852
+ self.vision_model.build(None)
853
+ if getattr(self, "visual_projection", None) is not None:
854
+ with tf.name_scope(self.visual_projection.name):
855
+ self.visual_projection.build([None, None, self.vision_embed_dim])
856
+ if getattr(self, "text_projection", None) is not None:
857
+ with tf.name_scope(self.text_projection.name):
858
+ self.text_projection.build([None, None, self.text_embed_dim])
859
+
860
+ @unpack_inputs
861
+ def call(
862
+ self,
863
+ input_ids: tf.Tensor | None = None,
864
+ pixel_values: tf.Tensor | None = None,
865
+ attention_mask: tf.Tensor | None = None,
866
+ position_ids: tf.Tensor | None = None,
867
+ return_loss: Optional[bool] = None,
868
+ output_attentions: Optional[bool] = None,
869
+ output_hidden_states: Optional[bool] = None,
870
+ return_dict: Optional[bool] = None,
871
+ training: Optional[bool] = None,
872
+ ) -> Union[Tuple, TFBlipOutput]:
873
+ # Use BLIP model's config for some fields (if specified) instead of those of vision & text components.
874
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
875
+ output_hidden_states = (
876
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
877
+ )
878
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
879
+
880
+ vision_outputs = self.vision_model(
881
+ pixel_values=pixel_values,
882
+ output_attentions=output_attentions,
883
+ output_hidden_states=output_hidden_states,
884
+ return_dict=return_dict,
885
+ training=training,
886
+ )
887
+
888
+ text_outputs = self.text_model(
889
+ input_ids=input_ids,
890
+ attention_mask=attention_mask,
891
+ position_ids=position_ids,
892
+ output_attentions=output_attentions,
893
+ output_hidden_states=output_hidden_states,
894
+ return_dict=return_dict,
895
+ training=training,
896
+ )
897
+
898
+ image_embeds = vision_outputs[1]
899
+ image_embeds = self.visual_projection(image_embeds)
900
+
901
+ text_embeds = text_outputs[1]
902
+ text_embeds = self.text_projection(text_embeds)
903
+
904
+ # normalized features
905
+ image_embeds = image_embeds / tf.norm(image_embeds, ord=2, axis=-1, keepdims=True)
906
+ text_embeds = text_embeds / tf.norm(text_embeds, ord=2, axis=-1, keepdims=True)
907
+
908
+ # cosine similarity as logits
909
+ logit_scale = tf.exp(self.logit_scale)
910
+ logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
911
+ logits_per_image = tf.transpose(logits_per_text)
912
+
913
+ loss = None
914
+ if return_loss:
915
+ loss = blip_loss(logits_per_text)
916
+ loss = tf.reshape(loss, (1,))
917
+
918
+ if not return_dict:
919
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
920
+ return ((loss,) + output) if loss is not None else output
921
+
922
+ return TFBlipOutput(
923
+ loss=loss,
924
+ logits_per_image=logits_per_image,
925
+ logits_per_text=logits_per_text,
926
+ text_embeds=text_embeds,
927
+ image_embeds=image_embeds,
928
+ text_model_output=text_outputs,
929
+ vision_model_output=vision_outputs,
930
+ )
931
+
932
+
933
+ class TFBlipModel(TFBlipPreTrainedModel):
934
+ config_class = BlipConfig
935
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
936
+ main_input_name = "input_ids"
937
+
938
+ def __init__(self, config: BlipConfig, *inputs, **kwargs):
939
+ super().__init__(config, *inputs, **kwargs)
940
+
941
+ self.blip = TFBlipMainLayer(config, name="blip")
942
+
943
+ def serving_output(self, output: TFBlipOutput) -> TFBlipOutput:
944
+ return TFBlipOutput(
945
+ logits_per_image=output.logits_per_image,
946
+ logits_per_text=output.logits_per_text,
947
+ text_embeds=output.text_embeds,
948
+ image_embeds=output.image_embeds,
949
+ )
950
+
951
+ @unpack_inputs
952
+ @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING)
953
+ @replace_return_docstrings(output_type=TFBlipOutput, config_class=BlipConfig)
954
+ def call(
955
+ self,
956
+ input_ids: tf.Tensor | None = None,
957
+ pixel_values: tf.Tensor | None = None,
958
+ attention_mask: tf.Tensor | None = None,
959
+ position_ids: tf.Tensor | None = None,
960
+ return_loss: Optional[bool] = None,
961
+ output_attentions: Optional[bool] = None,
962
+ output_hidden_states: Optional[bool] = None,
963
+ return_dict: Optional[bool] = None,
964
+ training: Optional[bool] = None,
965
+ ) -> Union[Tuple, TFBlipOutput]:
966
+ r"""
967
+ Returns:
968
+
969
+ Examples:
970
+
971
+ ```python
972
+ >>> from PIL import Image
973
+ >>> import requests
974
+ >>> from transformers import AutoProcessor, TFBlipModel
975
+
976
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
977
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
978
+
979
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
980
+ >>> image = Image.open(requests.get(url, stream=True).raw)
981
+
982
+ >>> inputs = processor(
983
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
984
+ ... )
985
+
986
+ >>> outputs = model(**inputs)
987
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
988
+ >>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
989
+ ```"""
990
+ outputs = self.blip(
991
+ input_ids=input_ids,
992
+ pixel_values=pixel_values,
993
+ attention_mask=attention_mask,
994
+ position_ids=position_ids,
995
+ return_loss=return_loss,
996
+ output_attentions=output_attentions,
997
+ output_hidden_states=output_hidden_states,
998
+ return_dict=return_dict,
999
+ training=training,
1000
+ )
1001
+ return outputs
1002
+
1003
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
1004
+ def get_text_features(
1005
+ self,
1006
+ input_ids: tf.Tensor | None = None,
1007
+ attention_mask: tf.Tensor | None = None,
1008
+ position_ids: tf.Tensor | None = None,
1009
+ return_dict: Optional[bool] = None,
1010
+ ) -> tf.Tensor:
1011
+ r"""
1012
+ Returns:
1013
+ text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
1014
+ the projection layer to the pooled output of [`TFBlipTextModel`].
1015
+
1016
+ Examples:
1017
+
1018
+ ```python
1019
+ >>> from transformers import AutoProcessor, TFBlipModel
1020
+
1021
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
1022
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1023
+
1024
+ >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
1025
+ >>> text_features = model.get_text_features(**inputs)
1026
+ ```"""
1027
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1028
+
1029
+ text_outputs = self.blip.text_model(
1030
+ input_ids=input_ids,
1031
+ attention_mask=attention_mask,
1032
+ position_ids=position_ids,
1033
+ return_dict=return_dict,
1034
+ )
1035
+
1036
+ pooled_output = text_outputs[1]
1037
+ text_features = self.blip.text_projection(pooled_output)
1038
+
1039
+ return text_features
1040
+
1041
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1042
+ def get_image_features(
1043
+ self,
1044
+ pixel_values: tf.Tensor | None = None,
1045
+ return_dict: Optional[bool] = None,
1046
+ ) -> tf.Tensor:
1047
+ r"""
1048
+ Returns:
1049
+ image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
1050
+ the projection layer to the pooled output of [`TFBlipVisionModel`].
1051
+
1052
+ Examples:
1053
+
1054
+ ```python
1055
+ >>> from PIL import Image
1056
+ >>> import requests
1057
+ >>> from transformers import AutoProcessor, TFBlipModel
1058
+
1059
+ >>> model = TFBlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
1060
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1061
+
1062
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1063
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1064
+
1065
+ >>> inputs = processor(images=image, return_tensors="tf")
1066
+
1067
+ >>> image_features = model.get_image_features(**inputs)
1068
+ ```"""
1069
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1070
+
1071
+ vision_outputs = self.blip.vision_model(pixel_values=pixel_values, return_dict=return_dict)
1072
+
1073
+ pooled_output = vision_outputs[1] # pooled_output
1074
+ image_features = self.blip.visual_projection(pooled_output)
1075
+
1076
+ return image_features
1077
+
1078
+ def build(self, input_shape=None):
1079
+ if self.built:
1080
+ return
1081
+ self.built = True
1082
+ if getattr(self, "blip", None) is not None:
1083
+ with tf.name_scope(self.blip.name):
1084
+ self.blip.build(None)
1085
+
1086
+
1087
+ @add_start_docstrings(
1088
+ """
1089
+ BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass
1090
+ `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,
1091
+ the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption
1092
+ from the text input. If no text input is provided, the decoder will start with the [BOS] token only.
1093
+ """,
1094
+ BLIP_START_DOCSTRING,
1095
+ )
1096
+ class TFBlipForConditionalGeneration(TFBlipPreTrainedModel):
1097
+ config_class = BlipConfig
1098
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
1099
+ main_input_name = "pixel_values"
1100
+
1101
+ def __init__(self, config: BlipConfig, *args, **kwargs):
1102
+ super().__init__(config, *args, **kwargs)
1103
+
1104
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
1105
+
1106
+ self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name="text_decoder")
1107
+
1108
+ self.decoder_input_ids = config.text_config.bos_token_id
1109
+ self.decoder_pad_token_id = config.text_config.pad_token_id
1110
+
1111
+ def get_input_embeddings(self) -> keras.layers.Layer:
1112
+ return self.vision_model.embeddings.patch_embedding
1113
+
1114
+ @unpack_inputs
1115
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1116
+ @replace_return_docstrings(output_type=TFBlipForConditionalGenerationModelOutput, config_class=BlipConfig)
1117
+ def call(
1118
+ self,
1119
+ pixel_values: tf.Tensor,
1120
+ input_ids: tf.Tensor | None = None,
1121
+ attention_mask: tf.Tensor | None = None,
1122
+ output_attentions: Optional[bool] = None,
1123
+ output_hidden_states: Optional[bool] = None,
1124
+ labels: tf.Tensor | None = None,
1125
+ return_dict: Optional[bool] = None,
1126
+ training: Optional[bool] = None,
1127
+ ) -> Union[Tuple, TFBlipForConditionalGenerationModelOutput]:
1128
+ r"""
1129
+ Returns:
1130
+
1131
+ Examples:
1132
+
1133
+ ```python
1134
+ >>> from PIL import Image
1135
+ >>> import requests
1136
+ >>> from transformers import AutoProcessor, TFBlipForConditionalGeneration
1137
+
1138
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1139
+ >>> model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
1140
+
1141
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1142
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1143
+ >>> text = "A picture of"
1144
+
1145
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1146
+
1147
+ >>> outputs = model(**inputs)
1148
+ ```"""
1149
+
1150
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1151
+ vision_outputs = self.vision_model(
1152
+ pixel_values=pixel_values,
1153
+ output_attentions=output_attentions,
1154
+ output_hidden_states=output_hidden_states,
1155
+ return_dict=return_dict,
1156
+ training=training,
1157
+ )
1158
+
1159
+ image_embeds = vision_outputs[0]
1160
+
1161
+ outputs = self.text_decoder(
1162
+ input_ids=input_ids,
1163
+ attention_mask=attention_mask,
1164
+ encoder_hidden_states=image_embeds,
1165
+ labels=labels,
1166
+ return_dict=False,
1167
+ training=training,
1168
+ )
1169
+
1170
+ if not return_dict:
1171
+ outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:]
1172
+ return tuple(output for output in outputs if output is not None)
1173
+
1174
+ if labels is not None:
1175
+ loss = outputs[0]
1176
+ logits = outputs[1]
1177
+ else:
1178
+ loss = None
1179
+ logits = outputs[0]
1180
+
1181
+ if loss is not None and loss.shape.rank == 0:
1182
+ loss = tf.reshape(loss, (1,))
1183
+
1184
+ return TFBlipForConditionalGenerationModelOutput(
1185
+ loss=loss,
1186
+ logits=logits,
1187
+ image_embeds=image_embeds,
1188
+ last_hidden_state=vision_outputs.last_hidden_state,
1189
+ hidden_states=vision_outputs.hidden_states,
1190
+ attentions=vision_outputs.attentions,
1191
+ )
1192
+
1193
+ def generate(
1194
+ self,
1195
+ pixel_values: tf.Tensor,
1196
+ input_ids: tf.Tensor | None = None,
1197
+ attention_mask: tf.Tensor | None = None,
1198
+ **generate_kwargs,
1199
+ ) -> tf.Tensor:
1200
+ r"""
1201
+ Overrides *generate* function to be able to use the model as a conditional generator
1202
+
1203
+ Parameters:
1204
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, image_height, image_width)`:
1205
+ Input image to be processed
1206
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1207
+ The sequence used as a prompt for the generation.
1208
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1209
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1210
+
1211
+
1212
+ Examples:
1213
+ ```python
1214
+ >>> from PIL import Image
1215
+ >>> import requests
1216
+ >>> from transformers import AutoProcessor, TFBlipForConditionalGeneration
1217
+
1218
+ >>> model = TFBlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
1219
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
1220
+
1221
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1222
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1223
+
1224
+ >>> inputs = processor(images=image, return_tensors="tf")
1225
+
1226
+ >>> outputs = model.generate(**inputs)
1227
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1228
+ two cats sleeping on a couch
1229
+ ```
1230
+ """
1231
+
1232
+ batch_size = pixel_values.shape[0]
1233
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
1234
+
1235
+ image_embeds = vision_outputs[0]
1236
+
1237
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32)
1238
+
1239
+ if isinstance(input_ids, list):
1240
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int32)
1241
+ elif input_ids is None:
1242
+ input_ids = tf.convert_to_tensor(
1243
+ [[self.decoder_input_ids, self.config.text_config.eos_token_id]], dtype=tf.int32
1244
+ )
1245
+
1246
+ input_ids = tf.tile(input_ids, (batch_size, 1))
1247
+
1248
+ # PyTorch: input_ids[:, 0] = self.config.text_config.bos_token_id
1249
+ input_ids = tf.concat(
1250
+ [tf.ones((batch_size, 1), dtype=tf.int32) * self.config.text_config.bos_token_id, input_ids[:, 1:]], axis=1
1251
+ )
1252
+ attention_mask = attention_mask[:, :-1] if attention_mask is not None else None
1253
+
1254
+ outputs = self.text_decoder.generate(
1255
+ input_ids=input_ids[:, :-1],
1256
+ eos_token_id=self.config.text_config.sep_token_id,
1257
+ pad_token_id=self.config.text_config.pad_token_id,
1258
+ attention_mask=attention_mask,
1259
+ encoder_hidden_states=image_embeds,
1260
+ encoder_attention_mask=image_attention_mask,
1261
+ **generate_kwargs,
1262
+ )
1263
+
1264
+ return outputs
1265
+
1266
+ def build(self, input_shape=None):
1267
+ if self.built:
1268
+ return
1269
+ self.built = True
1270
+ if getattr(self, "vision_model", None) is not None:
1271
+ with tf.name_scope(self.vision_model.name):
1272
+ self.vision_model.build(None)
1273
+ if getattr(self, "text_decoder", None) is not None:
1274
+ with tf.name_scope(self.text_decoder.name):
1275
+ self.text_decoder.build(None)
1276
+
1277
+
1278
+ @add_start_docstrings(
1279
+ """
1280
+ BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text
1281
+ decoder. The vision encoder will encode the input image, the text encoder will encode the input question together
1282
+ with the encoding of the image, and the text decoder will output the answer to the question.
1283
+ """,
1284
+ BLIP_START_DOCSTRING,
1285
+ )
1286
+ class TFBlipForQuestionAnswering(TFBlipPreTrainedModel):
1287
+ config_class = BlipConfig
1288
+ _keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
1289
+
1290
+ def __init__(self, config: BlipConfig, *args, **kwargs):
1291
+ super().__init__(config, *args, **kwargs)
1292
+
1293
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
1294
+
1295
+ self.text_encoder = TFBlipTextModel(config.text_config, name="text_encoder", add_pooling_layer=False)
1296
+
1297
+ self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name="text_decoder")
1298
+
1299
+ self.decoder_pad_token_id = config.text_config.pad_token_id
1300
+ self.decoder_start_token_id = config.text_config.bos_token_id
1301
+
1302
+ def get_input_embeddings(self) -> keras.layers.Layer:
1303
+ return self.vision_model.embeddings.patch_embedding
1304
+
1305
+ # Adapted from transformers.models.t5.modeling_tf_t5.TFT5PreTrainedModel._shift_right
1306
+ def _shift_right(self, input_ids):
1307
+ decoder_start_token_id = self.decoder_start_token_id
1308
+ pad_token_id = self.decoder_pad_token_id
1309
+
1310
+ if decoder_start_token_id is None or pad_token_id is None:
1311
+ raise ValueError("decoder_start_token_id and pad_token_id must be defined!")
1312
+
1313
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
1314
+ start_tokens = tf.cast(start_tokens, input_ids.dtype) # Ensure compatible dtypes for concatenation
1315
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
1316
+
1317
+ # replace possible -100 values in labels by `pad_token_id`
1318
+ shifted_input_ids = tf.where(
1319
+ shifted_input_ids == -100,
1320
+ tf.cast(tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids.dtype),
1321
+ shifted_input_ids,
1322
+ )
1323
+
1324
+ # "Verify that `labels` has only positive values and -100"
1325
+ tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype))
1326
+
1327
+ return shifted_input_ids
1328
+
1329
+ @unpack_inputs
1330
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1331
+ @replace_return_docstrings(output_type=TFBlipTextVisionModelOutput, config_class=BlipVisionConfig)
1332
+ def call(
1333
+ self,
1334
+ input_ids: tf.Tensor,
1335
+ pixel_values: tf.Tensor | None = None,
1336
+ decoder_input_ids: tf.Tensor | None = None,
1337
+ decoder_attention_mask: tf.Tensor | None = None,
1338
+ attention_mask: tf.Tensor | None = None,
1339
+ output_attentions: Optional[bool] = None,
1340
+ output_hidden_states: Optional[bool] = None,
1341
+ labels: tf.Tensor | None = None,
1342
+ return_dict: Optional[bool] = None,
1343
+ training: Optional[bool] = None,
1344
+ ) -> Union[Tuple, TFBlipTextVisionModelOutput]:
1345
+ r"""
1346
+ Returns:
1347
+
1348
+ Examples:
1349
+
1350
+ ```python
1351
+ >>> from PIL import Image
1352
+ >>> import requests
1353
+ >>> from transformers import AutoProcessor, TFBlipForQuestionAnswering
1354
+
1355
+ >>> model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
1356
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
1357
+
1358
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1359
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1360
+
1361
+ >>> # training
1362
+ >>> text = "How many cats are in the picture?"
1363
+ >>> label = "2"
1364
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1365
+ >>> labels = processor(text=label, return_tensors="tf").input_ids
1366
+
1367
+ >>> inputs["labels"] = labels
1368
+ >>> outputs = model(**inputs)
1369
+ >>> loss = outputs.loss
1370
+
1371
+ >>> # inference
1372
+ >>> text = "How many cats are in the picture?"
1373
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1374
+ >>> outputs = model.generate(**inputs)
1375
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1376
+ 2
1377
+ ```"""
1378
+ if labels is None and decoder_input_ids is None:
1379
+ raise ValueError(
1380
+ "Either `decoder_input_ids` or `labels` should be passed when calling"
1381
+ " `TFBlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you"
1382
+ " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`"
1383
+ )
1384
+
1385
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1386
+
1387
+ vision_outputs = self.vision_model(
1388
+ pixel_values=pixel_values,
1389
+ output_attentions=output_attentions,
1390
+ output_hidden_states=output_hidden_states,
1391
+ return_dict=return_dict,
1392
+ training=training,
1393
+ )
1394
+
1395
+ image_embeds = vision_outputs[0]
1396
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64)
1397
+
1398
+ question_embeds = self.text_encoder(
1399
+ input_ids=input_ids,
1400
+ attention_mask=attention_mask,
1401
+ encoder_hidden_states=image_embeds,
1402
+ encoder_attention_mask=image_attention_mask,
1403
+ return_dict=return_dict,
1404
+ training=training,
1405
+ )
1406
+
1407
+ question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
1408
+
1409
+ if labels is not None and decoder_input_ids is None:
1410
+ # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153
1411
+ decoder_input_ids = labels
1412
+
1413
+ answer_output = self.text_decoder(
1414
+ input_ids=decoder_input_ids,
1415
+ attention_mask=decoder_attention_mask,
1416
+ encoder_hidden_states=question_embeds,
1417
+ encoder_attention_mask=attention_mask,
1418
+ labels=labels,
1419
+ return_dict=return_dict,
1420
+ training=training,
1421
+ )
1422
+
1423
+ if labels is not None:
1424
+ decoder_loss = tf.reduce_mean(answer_output.loss) if return_dict else tf.reduce_mean(answer_output[0])
1425
+ else:
1426
+ decoder_loss = None
1427
+
1428
+ if not return_dict:
1429
+ outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:]
1430
+ return tuple(output for output in outputs if output is not None)
1431
+
1432
+ return TFBlipTextVisionModelOutput(
1433
+ loss=decoder_loss,
1434
+ image_embeds=image_embeds,
1435
+ last_hidden_state=vision_outputs.last_hidden_state,
1436
+ hidden_states=vision_outputs.hidden_states,
1437
+ attentions=vision_outputs.attentions,
1438
+ )
1439
+
1440
+ def generate(
1441
+ self,
1442
+ input_ids: tf.Tensor,
1443
+ pixel_values: tf.Tensor,
1444
+ attention_mask: tf.Tensor | None = None,
1445
+ **generate_kwargs,
1446
+ ) -> tf.Tensor:
1447
+ r"""
1448
+ Overrides *generate* function to be able to use the model as a conditional generator
1449
+
1450
+ Parameters:
1451
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
1452
+ The sequence used as a prompt for the generation.
1453
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, image_height, image_width)`:
1454
+ Input image to be processed
1455
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1456
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for
1457
+ tokens that are NOT MASKED, `0` for MASKED tokens.
1458
+ generate_kwargs (dict, *optional*):
1459
+ Additional arguments passed to the `generate` function of the decoder
1460
+
1461
+
1462
+ Examples:
1463
+ ```python
1464
+ >>> from PIL import Image
1465
+ >>> import requests
1466
+ >>> from transformers import AutoProcessor, TFBlipForQuestionAnswering
1467
+
1468
+ >>> model = TFBlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
1469
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
1470
+
1471
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1472
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1473
+ >>> text = "How many cats are in the picture?"
1474
+
1475
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1476
+
1477
+ >>> outputs = model.generate(**inputs)
1478
+ >>> print(processor.decode(outputs[0], skip_special_tokens=True))
1479
+ 2
1480
+ ```
1481
+ """
1482
+ vision_outputs = self.vision_model(pixel_values=pixel_values)
1483
+
1484
+ image_embeds = vision_outputs[0]
1485
+
1486
+ image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32)
1487
+
1488
+ if isinstance(input_ids, list):
1489
+ input_ids = tf.Tensor(input_ids)
1490
+
1491
+ question_outputs = self.text_encoder(
1492
+ input_ids=input_ids,
1493
+ attention_mask=attention_mask,
1494
+ encoder_hidden_states=image_embeds,
1495
+ encoder_attention_mask=image_attention_mask,
1496
+ return_dict=False,
1497
+ )
1498
+
1499
+ question_embeds = question_outputs[0]
1500
+
1501
+ question_attention_mask = tf.ones(shape_list(question_embeds)[:-1], dtype=tf.int32)
1502
+
1503
+ bos_ids = tf.fill(
1504
+ (tf.shape(question_embeds)[0], 1), value=tf.cast(self.decoder_start_token_id, input_ids.dtype)
1505
+ )
1506
+
1507
+ outputs = self.text_decoder.generate(
1508
+ input_ids=bos_ids,
1509
+ eos_token_id=self.config.text_config.sep_token_id,
1510
+ pad_token_id=self.config.text_config.pad_token_id,
1511
+ encoder_hidden_states=question_embeds,
1512
+ encoder_attention_mask=question_attention_mask,
1513
+ **generate_kwargs,
1514
+ )
1515
+
1516
+ return outputs
1517
+
1518
+ def build(self, input_shape=None):
1519
+ if self.built:
1520
+ return
1521
+ self.built = True
1522
+ if getattr(self, "vision_model", None) is not None:
1523
+ with tf.name_scope(self.vision_model.name):
1524
+ self.vision_model.build(None)
1525
+ if getattr(self, "text_encoder", None) is not None:
1526
+ with tf.name_scope(self.text_encoder.name):
1527
+ self.text_encoder.build(None)
1528
+ if getattr(self, "text_decoder", None) is not None:
1529
+ with tf.name_scope(self.text_decoder.name):
1530
+ self.text_decoder.build(None)
1531
+
1532
+
1533
+ @add_start_docstrings(
1534
+ """
1535
+ BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of
1536
+ image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to
1537
+ the image.
1538
+ """,
1539
+ BLIP_START_DOCSTRING,
1540
+ )
1541
+ class TFBlipForImageTextRetrieval(TFBlipPreTrainedModel):
1542
+ config_class = BlipConfig
1543
+
1544
+ def __init__(self, config: BlipConfig, *args, **kwargs):
1545
+ super().__init__(config, *args, **kwargs)
1546
+
1547
+ self.vision_model = TFBlipVisionModel(config.vision_config, name="vision_model")
1548
+
1549
+ self.text_encoder = TFBlipTextModel(config.text_config, name="text_encoder", add_pooling_layer=False)
1550
+
1551
+ # vision projection layer
1552
+ self.vision_proj = keras.layers.Dense(
1553
+ config.image_text_hidden_size,
1554
+ kernel_initializer=get_initializer(config.initializer_range),
1555
+ name="vision_proj",
1556
+ )
1557
+
1558
+ # text projection layer
1559
+ self.text_proj = keras.layers.Dense(
1560
+ config.image_text_hidden_size,
1561
+ kernel_initializer=get_initializer(config.initializer_range),
1562
+ name="text_proj",
1563
+ )
1564
+
1565
+ # image text matching head
1566
+ self.itm_head = keras.layers.Dense(
1567
+ 2, kernel_initializer=get_initializer(config.initializer_range), name="itm_head"
1568
+ )
1569
+
1570
+ self.decoder_pad_token_id = (
1571
+ config.text_config.pad_token_id
1572
+ if not hasattr(config, "decoder_pad_token_id")
1573
+ else config.decoder_pad_token_id
1574
+ )
1575
+ self.decoder_start_token_id = (
1576
+ config.text_config.bos_token_id
1577
+ if not hasattr(config, "decoder_start_token_id")
1578
+ else config.decoder_start_token_id
1579
+ )
1580
+ self.config = config
1581
+
1582
+ def get_input_embeddings(self) -> keras.layers.Layer:
1583
+ return self.vision_model.embeddings.patch_embedding
1584
+
1585
+ @unpack_inputs
1586
+ @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
1587
+ @replace_return_docstrings(output_type=TFBlipImageTextMatchingModelOutput, config_class=BlipVisionConfig)
1588
+ def call(
1589
+ self,
1590
+ input_ids: tf.Tensor,
1591
+ pixel_values: tf.Tensor | None = None,
1592
+ use_itm_head: Optional[bool] = True,
1593
+ attention_mask: tf.Tensor | None = None,
1594
+ output_attentions: Optional[bool] = None,
1595
+ output_hidden_states: Optional[bool] = None,
1596
+ return_dict: Optional[bool] = None,
1597
+ training: Optional[bool] = None,
1598
+ ) -> Union[Tuple, TFBlipImageTextMatchingModelOutput]:
1599
+ r"""
1600
+ Returns:
1601
+
1602
+ Examples:
1603
+
1604
+ ```python
1605
+ >>> from PIL import Image
1606
+ >>> import requests
1607
+ >>> from transformers import AutoProcessor, TFBlipForImageTextRetrieval
1608
+
1609
+ >>> model = TFBlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco")
1610
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
1611
+
1612
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1613
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1614
+ >>> text = "an image of a cat"
1615
+
1616
+ >>> inputs = processor(images=image, text=text, return_tensors="tf")
1617
+ >>> outputs = model(**inputs)
1618
+ ```
1619
+ """
1620
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1621
+
1622
+ vision_outputs = self.vision_model(
1623
+ pixel_values=pixel_values,
1624
+ output_attentions=output_attentions,
1625
+ output_hidden_states=output_hidden_states,
1626
+ return_dict=return_dict,
1627
+ training=training,
1628
+ )
1629
+
1630
+ image_embeds = vision_outputs[0]
1631
+ image_atts = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64)
1632
+
1633
+ # Matt: In PyTorch, only one path (itm/non-itm) is taken. However, in TensorFlow this can result in
1634
+ # some layers not being built! To avoid this, we always call both paths, then use an if statement to select
1635
+ # which output to pass to the final output. The unnecessary nodes will be pruned from the final graph, but
1636
+ # not before the layers have all been built correctly.
1637
+ itm_question_embeds = self.text_encoder(
1638
+ input_ids=input_ids,
1639
+ attention_mask=attention_mask,
1640
+ encoder_hidden_states=image_embeds,
1641
+ encoder_attention_mask=image_atts,
1642
+ return_dict=return_dict,
1643
+ training=training,
1644
+ )
1645
+ itm_question_embeds = itm_question_embeds[0] if not return_dict else itm_question_embeds.last_hidden_state
1646
+
1647
+ itm_output = self.itm_head(itm_question_embeds[:, 0, :])
1648
+
1649
+ no_itm_question_embeds = self.text_encoder(
1650
+ input_ids=input_ids,
1651
+ attention_mask=attention_mask,
1652
+ return_dict=return_dict,
1653
+ training=training,
1654
+ )
1655
+ no_itm_question_embeds = (
1656
+ no_itm_question_embeds[0] if not return_dict else no_itm_question_embeds.last_hidden_state
1657
+ )
1658
+
1659
+ image_feat, _ = tf.linalg.normalize(self.vision_proj(image_embeds[:, 0, :]), ord=2, axis=-1)
1660
+ text_feat, _ = tf.linalg.normalize(self.text_proj(no_itm_question_embeds[:, 0, :]), ord=2, axis=-1)
1661
+
1662
+ no_itm_output = tf.matmul(image_feat, text_feat, transpose_b=True)
1663
+
1664
+ if use_itm_head:
1665
+ output = itm_output
1666
+ question_embeds = itm_question_embeds
1667
+ else:
1668
+ output = no_itm_output
1669
+ question_embeds = no_itm_question_embeds
1670
+
1671
+ if not return_dict:
1672
+ outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,)
1673
+ return tuple(output for output in outputs if output is not None)
1674
+
1675
+ return TFBlipImageTextMatchingModelOutput(
1676
+ itm_score=output,
1677
+ last_hidden_state=vision_outputs.last_hidden_state,
1678
+ hidden_states=vision_outputs.hidden_states,
1679
+ attentions=vision_outputs.attentions,
1680
+ question_embeds=question_embeds,
1681
+ )
1682
+
1683
+ def build(self, input_shape=None):
1684
+ if self.built:
1685
+ return
1686
+ self.built = True
1687
+ if getattr(self, "vision_model", None) is not None:
1688
+ with tf.name_scope(self.vision_model.name):
1689
+ self.vision_model.build(None)
1690
+ if getattr(self, "text_encoder", None) is not None:
1691
+ with tf.name_scope(self.text_encoder.name):
1692
+ self.text_encoder.build(None)
1693
+ if getattr(self, "vision_proj", None) is not None:
1694
+ with tf.name_scope(self.vision_proj.name):
1695
+ self.vision_proj.build([None, None, self.config.vision_config.hidden_size])
1696
+ if getattr(self, "text_proj", None) is not None:
1697
+ with tf.name_scope(self.text_proj.name):
1698
+ self.text_proj.build([None, None, self.config.text_config.hidden_size])
1699
+ if getattr(self, "itm_head", None) is not None:
1700
+ with tf.name_scope(self.itm_head.name):
1701
+ self.itm_head.build([None, None, self.config.text_config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/modeling_tf_blip_text.py ADDED
@@ -0,0 +1,1122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the BSD-3-clause license (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # https://opensource.org/licenses/BSD-3-Clause
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from __future__ import annotations
18
+
19
+ import math
20
+ from typing import Optional, Tuple
21
+
22
+ import tensorflow as tf
23
+
24
+ from ...modeling_tf_outputs import (
25
+ TFBaseModelOutputWithPastAndCrossAttentions,
26
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
27
+ TFCausalLMOutputWithCrossAttentions,
28
+ )
29
+ from ...modeling_tf_utils import (
30
+ TFModelInputType,
31
+ TFPreTrainedModel,
32
+ get_initializer,
33
+ get_tf_activation,
34
+ keras,
35
+ keras_serializable,
36
+ shape_list,
37
+ unpack_inputs,
38
+ )
39
+ from ...tf_utils import check_embeddings_within_bounds, invert_attention_mask, stable_softmax
40
+ from ...utils import add_start_docstrings_to_model_forward, logging
41
+ from .configuration_blip import BlipTextConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ BLIP_TEXT_INPUTS_DOCSTRING = r"""
47
+ Args:
48
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
49
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
50
+ it.
51
+
52
+ Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.
53
+
54
+ [What are input IDs?](../glossary#input-ids)
55
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
56
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
57
+
58
+ - 1 for tokens that are **not masked**,
59
+ - 0 for tokens that are **masked**.
60
+
61
+ [What are attention masks?](../glossary#attention-mask)
62
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
63
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
64
+ config.max_position_embeddings - 1]`.
65
+
66
+ [What are position IDs?](../glossary#position-ids)
67
+ output_attentions (`bool`, *optional*):
68
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
69
+ tensors for more detail.
70
+ output_hidden_states (`bool`, *optional*):
71
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
72
+ more detail.
73
+ return_dict (`bool`, *optional*):
74
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
75
+ """
76
+
77
+
78
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L52
79
+ class TFBlipTextEmbeddings(keras.layers.Layer):
80
+ """Construct the embeddings from word and position embeddings."""
81
+
82
+ def __init__(self, config, **kwargs):
83
+ super().__init__(**kwargs)
84
+ self.word_embeddings = keras.layers.Embedding(
85
+ config.vocab_size,
86
+ config.hidden_size,
87
+ embeddings_initializer=get_initializer(config.initializer_range),
88
+ name="word_embeddings",
89
+ )
90
+ self.position_embeddings = keras.layers.Embedding(
91
+ config.max_position_embeddings,
92
+ config.hidden_size,
93
+ embeddings_initializer=get_initializer(config.initializer_range),
94
+ name="position_embeddings",
95
+ )
96
+
97
+ # self.LayerNorm is not snake-cased to stick with PyTorch model variable name and be able to load
98
+ # any TensorFlow checkpoint file
99
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
100
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
101
+
102
+ self.position_ids = tf.expand_dims(tf.range(config.max_position_embeddings), 0)
103
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
104
+
105
+ self.config = config
106
+
107
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0, training=None):
108
+ if input_ids is not None:
109
+ input_shape = tf.shape(input_ids)
110
+ else:
111
+ input_shape = tf.shape(inputs_embeds)[:-1]
112
+
113
+ seq_length = input_shape[1]
114
+
115
+ if position_ids is None:
116
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
117
+
118
+ if inputs_embeds is None:
119
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
120
+ inputs_embeds = self.word_embeddings(input_ids)
121
+
122
+ embeddings = inputs_embeds
123
+
124
+ if self.position_embedding_type == "absolute":
125
+ position_embeddings = self.position_embeddings(position_ids)
126
+ embeddings += position_embeddings
127
+ embeddings = self.LayerNorm(embeddings)
128
+ embeddings = self.dropout(embeddings, training=training)
129
+ return embeddings
130
+
131
+ def build(self, input_shape=None):
132
+ if self.built:
133
+ return
134
+ self.built = True
135
+ if getattr(self, "word_embeddings", None) is not None:
136
+ with tf.name_scope(self.word_embeddings.name):
137
+ self.word_embeddings.build(None)
138
+ if getattr(self, "position_embeddings", None) is not None:
139
+ with tf.name_scope(self.position_embeddings.name):
140
+ self.position_embeddings.build(None)
141
+ if getattr(self, "LayerNorm", None) is not None:
142
+ with tf.name_scope(self.LayerNorm.name):
143
+ self.LayerNorm.build([None, None, self.config.hidden_size])
144
+ if getattr(self, "dropout", None) is not None:
145
+ with tf.name_scope(self.dropout.name):
146
+ self.dropout.build(None)
147
+
148
+
149
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L97
150
+ class TFBlipTextSelfAttention(keras.layers.Layer):
151
+ def __init__(self, config, is_cross_attention, **kwargs):
152
+ super().__init__(**kwargs)
153
+ self.config = config
154
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
155
+ raise ValueError(
156
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
157
+ % (config.hidden_size, config.num_attention_heads)
158
+ )
159
+
160
+ self.num_attention_heads = config.num_attention_heads
161
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
162
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
163
+
164
+ self.query = keras.layers.Dense(
165
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
166
+ )
167
+ self.key = keras.layers.Dense(
168
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
169
+ )
170
+ self.value = keras.layers.Dense(
171
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
172
+ )
173
+
174
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
175
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
176
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
177
+ self.max_position_embeddings = config.max_position_embeddings
178
+ self.distance_embedding = keras.layers.Embedding(
179
+ 2 * config.max_position_embeddings - 1, self.attention_head_size
180
+ )
181
+ self.is_cross_attention = is_cross_attention
182
+
183
+ def transpose_for_scores(self, x):
184
+ new_x_shape = tf.concat(
185
+ [tf.shape(x)[:-1], tf.constant([self.num_attention_heads, self.attention_head_size], dtype=tf.int32)],
186
+ axis=0,
187
+ )
188
+ x = tf.reshape(x, new_x_shape)
189
+ return tf.transpose(x, perm=(0, 2, 1, 3))
190
+
191
+ def call(
192
+ self,
193
+ hidden_states,
194
+ attention_mask=None,
195
+ head_mask=None,
196
+ encoder_hidden_states=None,
197
+ encoder_attention_mask=None,
198
+ past_key_value=None,
199
+ output_attentions=False,
200
+ training=None,
201
+ ):
202
+ mixed_query_layer = self.query(hidden_states)
203
+
204
+ # If this is instantiated as a cross-attention module, the keys
205
+ # and values come from an encoder; the attention mask needs to be
206
+ # such that the encoder's padding tokens are not attended to.
207
+ is_cross_attention = encoder_hidden_states is not None
208
+
209
+ if is_cross_attention:
210
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
211
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
212
+ attention_mask = encoder_attention_mask
213
+ elif past_key_value is not None:
214
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
215
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
216
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
217
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
218
+ else:
219
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
220
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
221
+
222
+ query_layer = self.transpose_for_scores(mixed_query_layer)
223
+
224
+ past_key_value = (key_layer, value_layer)
225
+
226
+ # Take the dot product between "query" and "key" to get the raw attention scores.
227
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
228
+
229
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
230
+ seq_length = shape_list(hidden_states)[1]
231
+ position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64, device=hidden_states.device), 1)
232
+ position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64, device=hidden_states.device), 0)
233
+ distance = position_ids_l - position_ids_r
234
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
235
+ positional_embedding = tf.cast(positional_embedding, query_layer.dtype) # fp16 compatibility
236
+
237
+ if self.position_embedding_type == "relative_key":
238
+ relative_position_scores = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
239
+ attention_scores = attention_scores + relative_position_scores
240
+ elif self.position_embedding_type == "relative_key_query":
241
+ relative_position_scores_query = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
242
+ relative_position_scores_key = tf.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
243
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
244
+
245
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
246
+ if attention_mask is not None:
247
+ # Apply the attention mask is (precomputed for all layers in BlipTextModel forward() function)
248
+ attention_scores = attention_scores + tf.cast(attention_mask, attention_scores.dtype)
249
+
250
+ # Normalize the attention scores to probabilities.
251
+ attention_probs = stable_softmax(attention_scores, axis=-1)
252
+
253
+ # This is actually dropping out entire tokens to attend to, which might
254
+ # seem a bit unusual, but is taken from the original Transformer paper.
255
+ attention_probs_dropped = self.dropout(attention_probs, training=training)
256
+
257
+ # Mask heads if we want to
258
+ if head_mask is not None:
259
+ attention_probs_dropped = attention_probs_dropped * head_mask
260
+
261
+ context_layer = attention_probs_dropped @ value_layer
262
+
263
+ context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3))
264
+ new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size]
265
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
266
+
267
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
268
+
269
+ outputs = outputs + (past_key_value,)
270
+ return outputs
271
+
272
+ def build(self, input_shape=None):
273
+ if self.built:
274
+ return
275
+ self.built = True
276
+ if getattr(self, "query", None) is not None:
277
+ with tf.name_scope(self.query.name):
278
+ self.query.build([None, None, self.config.hidden_size])
279
+ if self.is_cross_attention:
280
+ if getattr(self, "key", None) is not None:
281
+ with tf.name_scope(self.key.name):
282
+ self.key.build([None, None, self.config.encoder_hidden_size])
283
+ if getattr(self, "value", None) is not None:
284
+ with tf.name_scope(self.value.name):
285
+ self.value.build([None, None, self.config.encoder_hidden_size])
286
+ else:
287
+ if getattr(self, "key", None) is not None:
288
+ with tf.name_scope(self.key.name):
289
+ self.key.build([None, None, self.config.hidden_size])
290
+ if getattr(self, "value", None) is not None:
291
+ with tf.name_scope(self.value.name):
292
+ self.value.build([None, None, self.config.hidden_size])
293
+
294
+
295
+ class TFBlipTextSelfOutput(keras.layers.Layer):
296
+ def __init__(self, config: BlipTextConfig, **kwargs):
297
+ super().__init__(**kwargs)
298
+
299
+ self.dense = keras.layers.Dense(
300
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
301
+ )
302
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
303
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
304
+ self.config = config
305
+
306
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor:
307
+ hidden_states = self.dense(inputs=hidden_states)
308
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
309
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
310
+
311
+ return hidden_states
312
+
313
+ def build(self, input_shape=None):
314
+ if self.built:
315
+ return
316
+ self.built = True
317
+ if getattr(self, "dense", None) is not None:
318
+ with tf.name_scope(self.dense.name):
319
+ self.dense.build([None, None, self.config.hidden_size])
320
+ if getattr(self, "LayerNorm", None) is not None:
321
+ with tf.name_scope(self.LayerNorm.name):
322
+ self.LayerNorm.build([None, None, self.config.hidden_size])
323
+
324
+
325
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#242
326
+ class TFBlipTextAttention(keras.layers.Layer):
327
+ def __init__(self, config, is_cross_attention=False, **kwargs):
328
+ super().__init__(**kwargs)
329
+ self.self = TFBlipTextSelfAttention(config, is_cross_attention, name="self")
330
+ # "output" is a protected attribute on TF models
331
+ self.self_output = TFBlipTextSelfOutput(config, name="output")
332
+
333
+ def call(
334
+ self,
335
+ hidden_states: tf.Tensor,
336
+ attention_mask: tf.Tensor | None = None,
337
+ head_mask: tf.Tensor | None = None,
338
+ encoder_hidden_states: tf.Tensor | None = None,
339
+ encoder_attention_mask: tf.Tensor | None = None,
340
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
341
+ output_attentions: Optional[bool] = False,
342
+ training: Optional[bool] = None,
343
+ ):
344
+ self_outputs = self.self(
345
+ hidden_states,
346
+ attention_mask,
347
+ head_mask,
348
+ encoder_hidden_states,
349
+ encoder_attention_mask,
350
+ past_key_value,
351
+ output_attentions,
352
+ training=training,
353
+ )
354
+ attention_output = self.self_output(self_outputs[0], hidden_states, training=training)
355
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
356
+ return outputs
357
+
358
+ def build(self, input_shape=None):
359
+ if self.built:
360
+ return
361
+ self.built = True
362
+ if getattr(self, "self", None) is not None:
363
+ with tf.name_scope(self.self.name):
364
+ self.self.build(None)
365
+ if getattr(self, "self_output", None) is not None:
366
+ with tf.name_scope(self.self_output.name):
367
+ self.self_output.build(None)
368
+
369
+
370
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->BlipText
371
+ class TFBlipTextIntermediate(keras.layers.Layer):
372
+ def __init__(self, config: BlipTextConfig, **kwargs):
373
+ super().__init__(**kwargs)
374
+
375
+ self.dense = keras.layers.Dense(
376
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
377
+ )
378
+
379
+ if isinstance(config.hidden_act, str):
380
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
381
+ else:
382
+ self.intermediate_act_fn = config.hidden_act
383
+ self.config = config
384
+
385
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
386
+ hidden_states = self.dense(inputs=hidden_states)
387
+ hidden_states = self.intermediate_act_fn(hidden_states)
388
+
389
+ return hidden_states
390
+
391
+ def build(self, input_shape=None):
392
+ if self.built:
393
+ return
394
+ self.built = True
395
+ if getattr(self, "dense", None) is not None:
396
+ with tf.name_scope(self.dense.name):
397
+ self.dense.build([None, None, self.config.hidden_size])
398
+
399
+
400
+ class TFBlipTextOutput(keras.layers.Layer):
401
+ def __init__(self, config: BlipTextConfig, **kwargs):
402
+ super().__init__(**kwargs)
403
+
404
+ self.dense = keras.layers.Dense(
405
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
406
+ )
407
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
408
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
409
+ self.config = config
410
+
411
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
412
+ hidden_states = self.dense(inputs=hidden_states)
413
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
414
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
415
+
416
+ return hidden_states
417
+
418
+ def build(self, input_shape=None):
419
+ if self.built:
420
+ return
421
+ self.built = True
422
+ if getattr(self, "dense", None) is not None:
423
+ with tf.name_scope(self.dense.name):
424
+ self.dense.build([None, None, self.config.intermediate_size])
425
+ if getattr(self, "LayerNorm", None) is not None:
426
+ with tf.name_scope(self.LayerNorm.name):
427
+ self.LayerNorm.build([None, None, self.config.hidden_size])
428
+
429
+
430
+ class TFBlipTextLayer(keras.layers.Layer):
431
+ def __init__(self, config, **kwargs):
432
+ super().__init__(**kwargs)
433
+ self.config = config
434
+ self.attention = TFBlipTextAttention(config, name="attention")
435
+ if self.config.is_decoder:
436
+ self.crossattention = TFBlipTextAttention(
437
+ config, is_cross_attention=self.config.is_decoder, name="crossattention"
438
+ )
439
+ self.intermediate = TFBlipTextIntermediate(config, name="intermediate")
440
+ self.self_output = TFBlipTextOutput(config, name="output")
441
+
442
+ def call(
443
+ self,
444
+ hidden_states,
445
+ attention_mask=None,
446
+ head_mask=None,
447
+ encoder_hidden_states=None,
448
+ encoder_attention_mask=None,
449
+ past_key_value=None,
450
+ output_attentions=False,
451
+ training=None,
452
+ ):
453
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
454
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
455
+ self_attention_outputs = self.attention(
456
+ hidden_states,
457
+ attention_mask,
458
+ head_mask,
459
+ output_attentions=output_attentions,
460
+ past_key_value=self_attn_past_key_value,
461
+ training=training,
462
+ )
463
+ attention_output = self_attention_outputs[0]
464
+
465
+ outputs = self_attention_outputs[1:-1]
466
+ present_key_value = self_attention_outputs[-1]
467
+
468
+ if encoder_hidden_states is not None:
469
+ cross_attention_outputs = self.crossattention(
470
+ attention_output,
471
+ attention_mask,
472
+ head_mask,
473
+ encoder_hidden_states,
474
+ encoder_attention_mask,
475
+ output_attentions=output_attentions,
476
+ training=training,
477
+ )
478
+ attention_output = cross_attention_outputs[0]
479
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
480
+ intermediate_output = self.intermediate(attention_output)
481
+ layer_output = self.self_output(intermediate_output, attention_output, training=training)
482
+ outputs = (layer_output,) + outputs
483
+
484
+ outputs = outputs + (present_key_value,)
485
+
486
+ return outputs
487
+
488
+ def build(self, input_shape=None):
489
+ if self.built:
490
+ return
491
+ self.built = True
492
+ if getattr(self, "attention", None) is not None:
493
+ with tf.name_scope(self.attention.name):
494
+ self.attention.build(None)
495
+ if getattr(self, "intermediate", None) is not None:
496
+ with tf.name_scope(self.intermediate.name):
497
+ self.intermediate.build(None)
498
+ if getattr(self, "self_output", None) is not None:
499
+ with tf.name_scope(self.self_output.name):
500
+ self.self_output.build(None)
501
+ if getattr(self, "crossattention", None) is not None:
502
+ with tf.name_scope(self.crossattention.name):
503
+ self.crossattention.build(None)
504
+
505
+
506
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L386
507
+ @keras_serializable
508
+ class TFBlipTextEncoder(keras.layers.Layer):
509
+ config_class = BlipTextConfig
510
+
511
+ def __init__(self, config, name=None, **kwargs):
512
+ super().__init__(name=name, **kwargs)
513
+ self.config = config
514
+ self.layer = [TFBlipTextLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
515
+
516
+ @unpack_inputs
517
+ def call(
518
+ self,
519
+ hidden_states,
520
+ attention_mask=None,
521
+ head_mask=None,
522
+ encoder_hidden_states=None,
523
+ encoder_attention_mask=None,
524
+ past_key_values=None,
525
+ use_cache=None,
526
+ output_attentions=False,
527
+ output_hidden_states=False,
528
+ return_dict=True,
529
+ training=None,
530
+ ):
531
+ all_hidden_states = () if output_hidden_states else None
532
+ all_self_attentions = () if output_attentions else None
533
+ all_cross_attentions = () if output_attentions and self.config.is_decoder else None
534
+
535
+ next_decoder_cache = () if use_cache else None
536
+
537
+ for i in range(self.config.num_hidden_layers):
538
+ layer_module = self.layer[i]
539
+ if output_hidden_states:
540
+ all_hidden_states = all_hidden_states + (hidden_states,)
541
+
542
+ layer_head_mask = head_mask[i] if head_mask is not None else None
543
+ past_key_value = past_key_values[i] if past_key_values is not None else None
544
+
545
+ layer_outputs = layer_module(
546
+ hidden_states,
547
+ attention_mask,
548
+ layer_head_mask,
549
+ encoder_hidden_states,
550
+ encoder_attention_mask,
551
+ past_key_value,
552
+ output_attentions,
553
+ training=training,
554
+ )
555
+
556
+ hidden_states = layer_outputs[0]
557
+ if use_cache:
558
+ next_decoder_cache += (layer_outputs[-1],)
559
+ if output_attentions:
560
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
561
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
562
+
563
+ if output_hidden_states:
564
+ all_hidden_states = all_hidden_states + (hidden_states,)
565
+
566
+ if not return_dict:
567
+ return tuple(
568
+ v
569
+ for v in [
570
+ hidden_states,
571
+ next_decoder_cache,
572
+ all_hidden_states,
573
+ all_self_attentions,
574
+ all_cross_attentions,
575
+ ]
576
+ if v is not None
577
+ )
578
+ return TFBaseModelOutputWithPastAndCrossAttentions(
579
+ last_hidden_state=hidden_states,
580
+ past_key_values=next_decoder_cache,
581
+ hidden_states=all_hidden_states,
582
+ attentions=all_self_attentions,
583
+ cross_attentions=all_cross_attentions,
584
+ )
585
+
586
+ def build(self, input_shape=None):
587
+ if self.built:
588
+ return
589
+ self.built = True
590
+ if getattr(self, "layer", None) is not None:
591
+ for layer in self.layer:
592
+ with tf.name_scope(layer.name):
593
+ layer.build(None)
594
+
595
+
596
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->BlipText
597
+ class TFBlipTextPooler(keras.layers.Layer):
598
+ def __init__(self, config: BlipTextConfig, **kwargs):
599
+ super().__init__(**kwargs)
600
+
601
+ self.dense = keras.layers.Dense(
602
+ units=config.hidden_size,
603
+ kernel_initializer=get_initializer(config.initializer_range),
604
+ activation="tanh",
605
+ name="dense",
606
+ )
607
+ self.config = config
608
+
609
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
610
+ # We "pool" the model by simply taking the hidden state corresponding
611
+ # to the first token.
612
+ first_token_tensor = hidden_states[:, 0]
613
+ pooled_output = self.dense(inputs=first_token_tensor)
614
+
615
+ return pooled_output
616
+
617
+ def build(self, input_shape=None):
618
+ if self.built:
619
+ return
620
+ self.built = True
621
+ if getattr(self, "dense", None) is not None:
622
+ with tf.name_scope(self.dense.name):
623
+ self.dense.build([None, None, self.config.hidden_size])
624
+
625
+
626
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->BlipText
627
+ class TFBlipTextPredictionHeadTransform(keras.layers.Layer):
628
+ def __init__(self, config: BlipTextConfig, **kwargs):
629
+ super().__init__(**kwargs)
630
+
631
+ self.dense = keras.layers.Dense(
632
+ units=config.hidden_size,
633
+ kernel_initializer=get_initializer(config.initializer_range),
634
+ name="dense",
635
+ )
636
+
637
+ if isinstance(config.hidden_act, str):
638
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
639
+ else:
640
+ self.transform_act_fn = config.hidden_act
641
+
642
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
643
+ self.config = config
644
+
645
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
646
+ hidden_states = self.dense(inputs=hidden_states)
647
+ hidden_states = self.transform_act_fn(hidden_states)
648
+ hidden_states = self.LayerNorm(inputs=hidden_states)
649
+
650
+ return hidden_states
651
+
652
+ def build(self, input_shape=None):
653
+ if self.built:
654
+ return
655
+ self.built = True
656
+ if getattr(self, "dense", None) is not None:
657
+ with tf.name_scope(self.dense.name):
658
+ self.dense.build([None, None, self.config.hidden_size])
659
+ if getattr(self, "LayerNorm", None) is not None:
660
+ with tf.name_scope(self.LayerNorm.name):
661
+ self.LayerNorm.build([None, None, self.config.hidden_size])
662
+
663
+
664
+ class TFBlipTextLMPredictionHead(keras.layers.Layer):
665
+ def __init__(self, config, **kwargs):
666
+ super().__init__(**kwargs)
667
+ self.transform = TFBlipTextPredictionHeadTransform(config, name="transform")
668
+
669
+ # The output weights are the same as the input embeddings, but there is
670
+ # an output-only bias for each token.
671
+ self.decoder = keras.layers.Dense(
672
+ config.vocab_size,
673
+ kernel_initializer=get_initializer(config.initializer_range),
674
+ name="decoder",
675
+ use_bias=False,
676
+ )
677
+ self.config = config
678
+
679
+ def build(self, input_shape=None):
680
+ self.bias = self.add_weight(name="bias", shape=(self.config.vocab_size,), initializer="zeros", trainable=True)
681
+
682
+ if self.built:
683
+ return
684
+ self.built = True
685
+ if getattr(self, "transform", None) is not None:
686
+ with tf.name_scope(self.transform.name):
687
+ self.transform.build(None)
688
+ if getattr(self, "decoder", None) is not None:
689
+ with tf.name_scope(self.decoder.name):
690
+ self.decoder.build([None, None, self.config.hidden_size])
691
+
692
+ def call(self, hidden_states):
693
+ hidden_states = self.transform(hidden_states)
694
+ hidden_states = self.decoder(hidden_states) + self.bias
695
+ return hidden_states
696
+
697
+
698
+ class TFBlipTextOnlyMLMHead(keras.layers.Layer):
699
+ def __init__(self, config, **kwargs):
700
+ super().__init__(**kwargs)
701
+ self.predictions = TFBlipTextLMPredictionHead(config, name="predictions")
702
+
703
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
704
+ prediction_scores = self.predictions(sequence_output)
705
+ return prediction_scores
706
+
707
+ def build(self, input_shape=None):
708
+ if self.built:
709
+ return
710
+ self.built = True
711
+ if getattr(self, "predictions", None) is not None:
712
+ with tf.name_scope(self.predictions.name):
713
+ self.predictions.build(None)
714
+
715
+
716
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L548
717
+ class TFBlipTextPreTrainedModel(TFPreTrainedModel):
718
+ """
719
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
720
+ models.
721
+ """
722
+
723
+ config_class = BlipTextConfig
724
+ base_model_prefix = "bert"
725
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
726
+
727
+
728
+ # Adapted from https://github.com/salesforce/BLIP/blob/3a29b7410476bf5f2ba0955827390eb6ea1f4f9d/models/med.py#L571
729
+ class TFBlipTextModel(TFBlipTextPreTrainedModel):
730
+ """
731
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
732
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
733
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
734
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an
735
+ `encoder_hidden_states` is then expected as an input to the forward pass.
736
+ """
737
+
738
+ def __init__(self, config, add_pooling_layer=True, name=None, **kwargs):
739
+ super().__init__(config, name=name, **kwargs)
740
+ self.config = config
741
+
742
+ self.embeddings = TFBlipTextEmbeddings(config, name="embeddings")
743
+ self.encoder = TFBlipTextEncoder(config, name="encoder")
744
+ self.pooler = TFBlipTextPooler(config, name="pooler") if add_pooling_layer else None
745
+
746
+ def get_input_embeddings(self):
747
+ return self.embeddings.word_embeddings
748
+
749
+ def set_input_embeddings(self, value):
750
+ self.embeddings.word_embeddings = value
751
+
752
+ @tf.function
753
+ def get_extended_attention_mask(
754
+ self, attention_mask: tf.Tensor, input_shape: Tuple[int], is_decoder: bool
755
+ ) -> tf.Tensor:
756
+ """
757
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
758
+
759
+ Arguments:
760
+ attention_mask (`tf.Tensor`):
761
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
762
+ input_shape (`Tuple[int]`):
763
+ The shape of the input to the model.
764
+ is_decoder (`bool`):
765
+ Whether the model is used as a decoder.
766
+
767
+ Returns:
768
+ `tf.Tensor` The extended attention mask, with the same dtype as `attention_mask.dtype`.
769
+ """
770
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
771
+ # ourselves in which case we just need to make it broadcastable to all heads.
772
+ if not isinstance(attention_mask, tf.Tensor):
773
+ attention_mask = tf.convert_to_tensor(attention_mask) # Catches NumPy inputs that haven't been cast yet
774
+ if attention_mask.shape.rank == 3:
775
+ extended_attention_mask = attention_mask[:, None, :, :]
776
+ elif attention_mask.shape.rank == 2:
777
+ # Provided a padding mask of dimensions [batch_size, seq_length]
778
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
779
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
780
+ if is_decoder:
781
+ batch_size, seq_length = input_shape
782
+
783
+ seq_ids = tf.range(seq_length, dtype=attention_mask.dtype)
784
+ causal_mask = tf.broadcast_to(seq_ids, (batch_size, seq_length, seq_length)) <= seq_ids[None, :, None]
785
+ # in case past_key_values are used we need to add a prefix ones mask to the causal mask
786
+
787
+ if shape_list(causal_mask)[1] < shape_list(attention_mask)[1]:
788
+ prefix_seq_len = tf.shape(attention_mask)[1] - tf.shape(causal_mask)[1]
789
+ causal_mask = tf.concat(
790
+ [
791
+ tf.ones((batch_size, seq_length, prefix_seq_len), dtype=causal_mask.dtype),
792
+ causal_mask,
793
+ ],
794
+ axis=-1,
795
+ )
796
+ extended_attention_mask = (
797
+ tf.cast(causal_mask[:, None, :, :], attention_mask.dtype) * attention_mask[:, None, None, :]
798
+ )
799
+ else:
800
+ extended_attention_mask = attention_mask[:, None, None, :]
801
+ else:
802
+ raise ValueError(
803
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
804
+ input_shape, attention_mask.shape
805
+ )
806
+ )
807
+
808
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
809
+ # masked positions, this operation will create a tensor which is 0.0 for
810
+ # positions we want to attend and -10000.0 for masked positions.
811
+ # Since we are adding it to the raw scores before the softmax, this is
812
+ # effectively the same as removing these entirely.
813
+ extended_attention_mask = tf.cast(extended_attention_mask, self.dtype) # fp16 compatibility
814
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
815
+ return extended_attention_mask
816
+
817
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
818
+ @unpack_inputs
819
+ def call(
820
+ self,
821
+ input_ids: TFModelInputType | None = None,
822
+ attention_mask: tf.Tensor | None = None,
823
+ position_ids: tf.Tensor | None = None,
824
+ head_mask: tf.Tensor | None = None,
825
+ inputs_embeds: tf.Tensor | None = None,
826
+ encoder_embeds: tf.Tensor | None = None,
827
+ encoder_hidden_states: tf.Tensor | None = None,
828
+ encoder_attention_mask: tf.Tensor | None = None,
829
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
830
+ use_cache: bool | None = None,
831
+ output_attentions: bool | None = None,
832
+ output_hidden_states: bool | None = None,
833
+ return_dict: bool | None = None,
834
+ is_decoder: bool = False,
835
+ training: bool = False,
836
+ ) -> Tuple[tf.Tensor] | TFBaseModelOutputWithPoolingAndCrossAttentions:
837
+ r"""
838
+ encoder_hidden_states (`tf.Tensor`, *optional*):
839
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
840
+ the model is configured as a decoder.
841
+ encoder_attention_mask (`tf.Tensor`, *optional*):
842
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
843
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
844
+ - 1 for tokens that are **not masked**,
845
+ - 0 for tokens that are **masked**.
846
+ past_key_values (`tuple(tuple(tf.Tensor))`, *optional*):
847
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
848
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
849
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
850
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
851
+ use_cache (`bool`, *optional*):
852
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
853
+ `past_key_values`).
854
+ """
855
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
856
+ output_hidden_states = (
857
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
858
+ )
859
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
860
+
861
+ if is_decoder:
862
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
863
+ else:
864
+ use_cache = False
865
+
866
+ if input_ids is not None and inputs_embeds is not None:
867
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
868
+ elif input_ids is not None:
869
+ input_shape = shape_list(input_ids)
870
+ batch_size, seq_length = input_shape
871
+ elif inputs_embeds is not None:
872
+ input_shape = shape_list(inputs_embeds)[:-1]
873
+ batch_size, seq_length = input_shape
874
+ elif encoder_embeds is not None:
875
+ input_shape = shape_list(encoder_embeds)[:-1]
876
+ batch_size, seq_length = input_shape
877
+ else:
878
+ raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
879
+
880
+ # past_key_values_length
881
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
882
+
883
+ if attention_mask is None:
884
+ attention_mask = tf.ones(((batch_size, seq_length + past_key_values_length)))
885
+
886
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
887
+ # ourselves in which case we just need to make it broadcastable to all heads.
888
+ extended_attention_mask: tf.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, is_decoder)
889
+
890
+ # If a 2D or 3D attention mask is provided for the cross-attention
891
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
892
+ if encoder_hidden_states is not None:
893
+ if isinstance(encoder_hidden_states, list):
894
+ encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states[0])
895
+ else:
896
+ encoder_batch_size, encoder_sequence_length, _ = shape_list(encoder_hidden_states)
897
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
898
+
899
+ if isinstance(encoder_attention_mask, list):
900
+ encoder_extended_attention_mask = [invert_attention_mask(mask) for mask in encoder_attention_mask]
901
+ elif encoder_attention_mask is None:
902
+ encoder_attention_mask = tf.ones(encoder_hidden_shape)
903
+ encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask)
904
+ else:
905
+ encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask)
906
+ else:
907
+ encoder_extended_attention_mask = None
908
+
909
+ # Prepare head mask if needed
910
+ # 1.0 in head_mask indicate we keep the head
911
+ # attention_probs has shape bsz x n_heads x N x N
912
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
913
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
914
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
915
+
916
+ if encoder_embeds is None:
917
+ embedding_output = self.embeddings(
918
+ input_ids=input_ids,
919
+ position_ids=position_ids,
920
+ inputs_embeds=inputs_embeds,
921
+ past_key_values_length=past_key_values_length,
922
+ )
923
+ else:
924
+ embedding_output = encoder_embeds
925
+
926
+ encoder_outputs = self.encoder(
927
+ embedding_output,
928
+ attention_mask=extended_attention_mask,
929
+ head_mask=head_mask,
930
+ encoder_hidden_states=encoder_hidden_states,
931
+ encoder_attention_mask=encoder_extended_attention_mask,
932
+ past_key_values=past_key_values,
933
+ use_cache=use_cache,
934
+ output_attentions=output_attentions,
935
+ output_hidden_states=output_hidden_states,
936
+ return_dict=return_dict,
937
+ training=training,
938
+ )
939
+ sequence_output = encoder_outputs[0]
940
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
941
+
942
+ if not return_dict:
943
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
944
+
945
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
946
+ last_hidden_state=sequence_output,
947
+ pooler_output=pooled_output,
948
+ past_key_values=encoder_outputs.past_key_values,
949
+ hidden_states=encoder_outputs.hidden_states,
950
+ attentions=encoder_outputs.attentions,
951
+ cross_attentions=encoder_outputs.cross_attentions,
952
+ )
953
+
954
+ def build(self, input_shape=None):
955
+ if self.built:
956
+ return
957
+ self.built = True
958
+ if getattr(self, "embeddings", None) is not None:
959
+ with tf.name_scope(self.embeddings.name):
960
+ self.embeddings.build(None)
961
+ if getattr(self, "encoder", None) is not None:
962
+ with tf.name_scope(self.encoder.name):
963
+ self.encoder.build(None)
964
+ if getattr(self, "pooler", None) is not None:
965
+ with tf.name_scope(self.pooler.name):
966
+ self.pooler.build(None)
967
+
968
+
969
+ # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L811
970
+ class TFBlipTextLMHeadModel(TFBlipTextPreTrainedModel):
971
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
972
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
973
+
974
+ def __init__(self, config, **kwargs):
975
+ super().__init__(config, **kwargs)
976
+
977
+ self.bert = TFBlipTextModel(config, add_pooling_layer=False, name="bert")
978
+ self.cls = TFBlipTextOnlyMLMHead(config, name="cls")
979
+ self.label_smoothing = config.label_smoothing
980
+
981
+ def get_output_embeddings(self):
982
+ return self.cls.predictions.decoder
983
+
984
+ def set_output_embeddings(self, new_embeddings):
985
+ self.cls.predictions.decoder = new_embeddings
986
+
987
+ @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
988
+ @unpack_inputs
989
+ def call(
990
+ self,
991
+ input_ids=None,
992
+ attention_mask=None,
993
+ position_ids=None,
994
+ head_mask=None,
995
+ inputs_embeds=None,
996
+ encoder_hidden_states=None,
997
+ encoder_attention_mask=None,
998
+ labels=None,
999
+ past_key_values=None,
1000
+ use_cache=None,
1001
+ output_attentions=None,
1002
+ output_hidden_states=None,
1003
+ return_dict=None,
1004
+ return_logits=False,
1005
+ is_decoder=True,
1006
+ training=None,
1007
+ ):
1008
+ r"""
1009
+ encoder_hidden_states (`tf.Tensor`, *optional*): Sequence of
1010
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is
1011
+ configured as a decoder.
1012
+ encoder_attention_mask (`tf.Tensor`, *optional*):
1013
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1014
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1015
+ - 1 for tokens that are **not masked**,
1016
+ - 0 for tokens that are **masked**.
1017
+ labels (`tf.Tensor`, *optional*):
1018
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1019
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1020
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1021
+ past_key_values (`tuple(tuple(tf.Tensor))`, *optional*):
1022
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1023
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1024
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1025
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1026
+ use_cache (`bool`, *optional*):
1027
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1028
+ `past_key_values`).
1029
+ """
1030
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1031
+ if labels is not None:
1032
+ use_cache = False
1033
+
1034
+ outputs = self.bert(
1035
+ input_ids,
1036
+ attention_mask=attention_mask,
1037
+ position_ids=position_ids,
1038
+ head_mask=head_mask,
1039
+ inputs_embeds=inputs_embeds,
1040
+ encoder_hidden_states=encoder_hidden_states,
1041
+ encoder_attention_mask=encoder_attention_mask,
1042
+ past_key_values=past_key_values,
1043
+ use_cache=use_cache,
1044
+ output_attentions=output_attentions,
1045
+ output_hidden_states=output_hidden_states,
1046
+ return_dict=return_dict,
1047
+ is_decoder=is_decoder,
1048
+ training=training,
1049
+ )
1050
+
1051
+ sequence_output = outputs[0]
1052
+ prediction_scores = self.cls(sequence_output)
1053
+
1054
+ if return_logits:
1055
+ return prediction_scores[:, :-1, :]
1056
+
1057
+ lm_loss = None
1058
+ if labels is not None:
1059
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1060
+ shifted_prediction_scores = prediction_scores[:, :-1, :]
1061
+ shifted_prediction_scores = tf.reshape(shifted_prediction_scores, (-1, self.config.vocab_size))
1062
+ labels = labels[:, 1:]
1063
+ labels = tf.reshape(labels, (-1,))
1064
+ # Keras won't give us label smoothing for sparse CE, so we de-sparsify things here
1065
+ # Use relu to clamp masked labels at 0 to avoid NaN (we will be zeroing those out later anyway)
1066
+ one_hot_labels = tf.one_hot(tf.nn.relu(labels), depth=self.config.vocab_size, dtype=tf.float32)
1067
+ loss_fct = keras.losses.CategoricalCrossentropy(
1068
+ from_logits=True, label_smoothing=self.label_smoothing, reduction="none"
1069
+ )
1070
+ masked_positions = tf.cast(tf.not_equal(labels, -100), dtype=tf.float32)
1071
+ lm_loss = loss_fct(one_hot_labels, shifted_prediction_scores)
1072
+ lm_loss *= masked_positions
1073
+ lm_loss = tf.reduce_sum(lm_loss, axis=0) / tf.math.count_nonzero(masked_positions, dtype=tf.float32)
1074
+
1075
+ if not return_dict:
1076
+ output = (prediction_scores,) + outputs[2:]
1077
+ return ((lm_loss,) + output) if lm_loss is not None else output
1078
+
1079
+ return TFCausalLMOutputWithCrossAttentions(
1080
+ loss=lm_loss,
1081
+ logits=prediction_scores,
1082
+ past_key_values=outputs.past_key_values,
1083
+ hidden_states=outputs.hidden_states,
1084
+ attentions=outputs.attentions,
1085
+ cross_attentions=outputs.cross_attentions,
1086
+ )
1087
+
1088
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1089
+ input_shape = input_ids.shape
1090
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1091
+ if attention_mask is None:
1092
+ attention_mask = input_ids.new_ones(input_shape)
1093
+
1094
+ # cut decoder_input_ids if past_key_values is used
1095
+ if past_key_values is not None:
1096
+ input_ids = input_ids[:, -1:]
1097
+
1098
+ return {
1099
+ "input_ids": input_ids,
1100
+ "attention_mask": attention_mask,
1101
+ "past_key_values": past_key_values,
1102
+ "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
1103
+ "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
1104
+ "is_decoder": True,
1105
+ }
1106
+
1107
+ def _reorder_cache(self, past_key_values, beam_idx):
1108
+ reordered_past = ()
1109
+ for layer_past in past_key_values:
1110
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
1111
+ return reordered_past
1112
+
1113
+ def build(self, input_shape=None):
1114
+ if self.built:
1115
+ return
1116
+ self.built = True
1117
+ if getattr(self, "bert", None) is not None:
1118
+ with tf.name_scope(self.bert.name):
1119
+ self.bert.build(None)
1120
+ if getattr(self, "cls", None) is not None:
1121
+ with tf.name_scope(self.cls.name):
1122
+ self.cls.build(None)
llmeval-env/lib/python3.10/site-packages/transformers/models/blip/processing_blip.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Blip.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...image_utils import ImageInput
22
+ from ...processing_utils import ProcessorMixin
23
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
24
+ from ...utils import TensorType
25
+
26
+
27
+ class BlipProcessor(ProcessorMixin):
28
+ r"""
29
+ Constructs a BLIP processor which wraps a BERT tokenizer and BLIP image processor into a single processor.
30
+
31
+ [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`BertTokenizerFast`]. See the
32
+ docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor (`BlipImageProcessor`):
36
+ An instance of [`BlipImageProcessor`]. The image processor is a required input.
37
+ tokenizer (`BertTokenizerFast`):
38
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "BlipImageProcessor"
43
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
44
+
45
+ def __init__(self, image_processor, tokenizer):
46
+ tokenizer.return_token_type_ids = False
47
+ super().__init__(image_processor, tokenizer)
48
+ self.current_processor = self.image_processor
49
+
50
+ def __call__(
51
+ self,
52
+ images: ImageInput = None,
53
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
54
+ add_special_tokens: bool = True,
55
+ padding: Union[bool, str, PaddingStrategy] = False,
56
+ truncation: Union[bool, str, TruncationStrategy] = None,
57
+ max_length: Optional[int] = None,
58
+ stride: int = 0,
59
+ pad_to_multiple_of: Optional[int] = None,
60
+ return_attention_mask: Optional[bool] = None,
61
+ return_overflowing_tokens: bool = False,
62
+ return_special_tokens_mask: bool = False,
63
+ return_offsets_mapping: bool = False,
64
+ return_token_type_ids: bool = False,
65
+ return_length: bool = False,
66
+ verbose: bool = True,
67
+ return_tensors: Optional[Union[str, TensorType]] = None,
68
+ **kwargs,
69
+ ) -> BatchEncoding:
70
+ """
71
+ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
72
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
73
+
74
+ Please refer to the docstring of the above two methods for more information.
75
+ """
76
+ if images is None and text is None:
77
+ raise ValueError("You have to specify either images or text.")
78
+
79
+ # Get only text
80
+ if images is None:
81
+ self.current_processor = self.tokenizer
82
+ text_encoding = self.tokenizer(
83
+ text=text,
84
+ add_special_tokens=add_special_tokens,
85
+ padding=padding,
86
+ truncation=truncation,
87
+ max_length=max_length,
88
+ stride=stride,
89
+ pad_to_multiple_of=pad_to_multiple_of,
90
+ return_attention_mask=return_attention_mask,
91
+ return_overflowing_tokens=return_overflowing_tokens,
92
+ return_special_tokens_mask=return_special_tokens_mask,
93
+ return_offsets_mapping=return_offsets_mapping,
94
+ return_token_type_ids=return_token_type_ids,
95
+ return_length=return_length,
96
+ verbose=verbose,
97
+ return_tensors=return_tensors,
98
+ **kwargs,
99
+ )
100
+ return text_encoding
101
+
102
+ # add pixel_values
103
+ encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
104
+
105
+ if text is not None:
106
+ text_encoding = self.tokenizer(
107
+ text=text,
108
+ add_special_tokens=add_special_tokens,
109
+ padding=padding,
110
+ truncation=truncation,
111
+ max_length=max_length,
112
+ stride=stride,
113
+ pad_to_multiple_of=pad_to_multiple_of,
114
+ return_attention_mask=return_attention_mask,
115
+ return_overflowing_tokens=return_overflowing_tokens,
116
+ return_special_tokens_mask=return_special_tokens_mask,
117
+ return_offsets_mapping=return_offsets_mapping,
118
+ return_token_type_ids=return_token_type_ids,
119
+ return_length=return_length,
120
+ verbose=verbose,
121
+ return_tensors=return_tensors,
122
+ **kwargs,
123
+ )
124
+ else:
125
+ text_encoding = None
126
+
127
+ if text_encoding is not None:
128
+ encoding_image_processor.update(text_encoding)
129
+
130
+ return encoding_image_processor
131
+
132
+ def batch_decode(self, *args, **kwargs):
133
+ """
134
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
135
+ refer to the docstring of this method for more information.
136
+ """
137
+ return self.tokenizer.batch_decode(*args, **kwargs)
138
+
139
+ def decode(self, *args, **kwargs):
140
+ """
141
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
142
+ the docstring of this method for more information.
143
+ """
144
+ return self.tokenizer.decode(*args, **kwargs)
145
+
146
+ @property
147
+ def model_input_names(self):
148
+ tokenizer_input_names = self.tokenizer.model_input_names
149
+ image_processor_input_names = self.image_processor.model_input_names
150
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_encoder_decoder"] = ["EncoderDecoderModel"]
35
+
36
+ try:
37
+ if not is_tf_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_tf_encoder_decoder"] = ["TFEncoderDecoderModel"]
43
+
44
+ try:
45
+ if not is_flax_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ _import_structure["modeling_flax_encoder_decoder"] = ["FlaxEncoderDecoderModel"]
51
+
52
+ if TYPE_CHECKING:
53
+ from .configuration_encoder_decoder import EncoderDecoderConfig
54
+
55
+ try:
56
+ if not is_torch_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ from .modeling_encoder_decoder import EncoderDecoderModel
62
+
63
+ try:
64
+ if not is_tf_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
70
+
71
+ try:
72
+ if not is_flax_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
78
+
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/configuration_encoder_decoder.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_encoder_decoder.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_flax_encoder_decoder.cpython-310.pyc ADDED
Binary file (31.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_tf_encoder_decoder.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/configuration_encoder_decoder.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class EncoderDecoderConfig(PretrainedConfig):
26
+ r"""
27
+ [`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is
28
+ used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder
29
+ configs.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+ Args:
35
+ kwargs (*optional*):
36
+ Dictionary of keyword arguments. Notably:
37
+
38
+ - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
39
+ the encoder config.
40
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
41
+ the decoder config.
42
+
43
+ Examples:
44
+
45
+ ```python
46
+ >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
47
+
48
+ >>> # Initializing a BERT google-bert/bert-base-uncased style configuration
49
+ >>> config_encoder = BertConfig()
50
+ >>> config_decoder = BertConfig()
51
+
52
+ >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
53
+
54
+ >>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations
55
+ >>> model = EncoderDecoderModel(config=config)
56
+
57
+ >>> # Accessing the model configuration
58
+ >>> config_encoder = model.config.encoder
59
+ >>> config_decoder = model.config.decoder
60
+ >>> # set decoder config to causal lm
61
+ >>> config_decoder.is_decoder = True
62
+ >>> config_decoder.add_cross_attention = True
63
+
64
+ >>> # Saving the model, including its configuration
65
+ >>> model.save_pretrained("my-model")
66
+
67
+ >>> # loading model and config from pretrained folder
68
+ >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
69
+ >>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
70
+ ```"""
71
+
72
+ model_type = "encoder-decoder"
73
+ is_composition = True
74
+
75
+ def __init__(self, **kwargs):
76
+ super().__init__(**kwargs)
77
+ assert (
78
+ "encoder" in kwargs and "decoder" in kwargs
79
+ ), "Config has to be initialized with encoder and decoder config"
80
+ encoder_config = kwargs.pop("encoder")
81
+ encoder_model_type = encoder_config.pop("model_type")
82
+ decoder_config = kwargs.pop("decoder")
83
+ decoder_model_type = decoder_config.pop("model_type")
84
+
85
+ from ..auto.configuration_auto import AutoConfig
86
+
87
+ self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
88
+ self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
89
+ self.is_encoder_decoder = True
90
+
91
+ @classmethod
92
+ def from_encoder_decoder_configs(
93
+ cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
94
+ ) -> PretrainedConfig:
95
+ r"""
96
+ Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
97
+ decoder model configuration.
98
+
99
+ Returns:
100
+ [`EncoderDecoderConfig`]: An instance of a configuration object
101
+ """
102
+ logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
103
+ decoder_config.is_decoder = True
104
+ decoder_config.add_cross_attention = True
105
+
106
+ return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_encoder_decoder.py ADDED
@@ -0,0 +1,693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Classes to support Encoder-Decoder architectures"""
16
+
17
+
18
+ import gc
19
+ import inspect
20
+ import os
21
+ import tempfile
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import torch
26
+ from torch import nn
27
+ from torch.nn import CrossEntropyLoss
28
+
29
+ from ...configuration_utils import PretrainedConfig
30
+ from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
31
+ from ...modeling_utils import PreTrainedModel
32
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
33
+ from ..auto.configuration_auto import AutoConfig
34
+ from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
35
+ from .configuration_encoder_decoder import EncoderDecoderConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CONFIG_FOR_DOC = "EncoderDecoderConfig"
41
+
42
+ DEPRECATION_WARNING = (
43
+ "Version v4.12.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
44
+ " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
45
+ " fine-tuning a model trained with versions anterior to 4.12.0. The decoder_input_ids are now created based on the"
46
+ " labels, no need to pass them yourself anymore."
47
+ )
48
+
49
+ ENCODER_DECODER_START_DOCSTRING = r"""
50
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
51
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
52
+ [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]
53
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
54
+ generative task, like summarization.
55
+
56
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
57
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
58
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
59
+ Zhou, Wei Li, Peter J. Liu.
60
+
61
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
62
+ (see the examples for more information).
63
+
64
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
65
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
66
+ etc.)
67
+
68
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
69
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
70
+ and behavior.
71
+
72
+ Parameters:
73
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
74
+ Initializing with a config file does not load the weights associated with the model, only the
75
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
76
+ """
77
+
78
+ ENCODER_DECODER_INPUTS_DOCSTRING = r"""
79
+ Args:
80
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
81
+ Indices of input sequence tokens in the vocabulary.
82
+
83
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
84
+ [`PreTrainedTokenizer.__call__`] for details.
85
+
86
+ [What are input IDs?](../glossary#input-ids)
87
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
88
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
89
+
90
+ - 1 for tokens that are **not masked**,
91
+ - 0 for tokens that are **masked**.
92
+
93
+ [What are attention masks?](../glossary#attention-mask)
94
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
95
+ Indices of decoder input sequence tokens in the vocabulary.
96
+
97
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
98
+ [`PreTrainedTokenizer.__call__`] for details.
99
+
100
+ [What are input IDs?](../glossary#input-ids)
101
+
102
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
103
+ `past_key_values`).
104
+
105
+ For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
106
+ right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
107
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
108
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
109
+ be used by default.
110
+ encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
111
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
112
+ `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
113
+ of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
114
+ decoder.
115
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
116
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
117
+
118
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
119
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
120
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
121
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
122
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
123
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
124
+ model's internal embedding lookup matrix.
125
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
126
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
127
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
128
+ into associated vectors than the model's internal embedding lookup matrix.
129
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
130
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
131
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
132
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
133
+ use_cache (`bool`, *optional*):
134
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
135
+ `past_key_values`).
136
+ output_attentions (`bool`, *optional*):
137
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
138
+ tensors for more detail.
139
+ output_hidden_states (`bool`, *optional*):
140
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
141
+ more detail.
142
+ return_dict (`bool`, *optional*):
143
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
144
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
145
+
146
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
147
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
148
+ """
149
+
150
+
151
+ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
152
+ """
153
+ Shift input ids one token to the right.
154
+ """
155
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
156
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
157
+ if decoder_start_token_id is None:
158
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
159
+ shifted_input_ids[:, 0] = decoder_start_token_id
160
+
161
+ if pad_token_id is None:
162
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
163
+ # replace possible -100 values in labels by `pad_token_id`
164
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
165
+
166
+ return shifted_input_ids
167
+
168
+
169
+ @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
170
+ class EncoderDecoderModel(PreTrainedModel):
171
+ r"""
172
+ [`EncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
173
+ of the base model classes of the library as encoder and another one as decoder when created with the
174
+ :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
175
+ :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
176
+ """
177
+
178
+ config_class = EncoderDecoderConfig
179
+ base_model_prefix = "encoder_decoder"
180
+ main_input_name = "input_ids"
181
+ supports_gradient_checkpointing = True
182
+
183
+ def __init__(
184
+ self,
185
+ config: Optional[PretrainedConfig] = None,
186
+ encoder: Optional[PreTrainedModel] = None,
187
+ decoder: Optional[PreTrainedModel] = None,
188
+ ):
189
+ if config is None and (encoder is None or decoder is None):
190
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
191
+ if config is None:
192
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
193
+ else:
194
+ if not isinstance(config, self.config_class):
195
+ raise ValueError(f"Config: {config} has to be of type {self.config_class}")
196
+
197
+ if config.decoder.cross_attention_hidden_size is not None:
198
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
199
+ raise ValueError(
200
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
201
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
202
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
203
+ " `config.encoder.hidden_size`."
204
+ )
205
+
206
+ # initialize with config
207
+ super().__init__(config)
208
+
209
+ if encoder is None:
210
+ from ..auto.modeling_auto import AutoModel
211
+
212
+ encoder = AutoModel.from_config(config.encoder)
213
+
214
+ if decoder is None:
215
+ from ..auto.modeling_auto import AutoModelForCausalLM
216
+
217
+ decoder = AutoModelForCausalLM.from_config(config.decoder)
218
+
219
+ self.encoder = encoder
220
+ self.decoder = decoder
221
+
222
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
223
+ logger.warning(
224
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
225
+ f" {self.config.encoder}"
226
+ )
227
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
228
+ logger.warning(
229
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
230
+ f" {self.config.decoder}"
231
+ )
232
+
233
+ # make sure that the individual model's config refers to the shared config
234
+ # so that the updates to the config will be synced
235
+ self.encoder.config = self.config.encoder
236
+ self.decoder.config = self.config.decoder
237
+
238
+ # encoder outputs might need to be projected to different dimension for decoder
239
+ if (
240
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
241
+ and self.decoder.config.cross_attention_hidden_size is None
242
+ ):
243
+ self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
244
+
245
+ if self.encoder.get_output_embeddings() is not None:
246
+ raise ValueError(
247
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
248
+ )
249
+
250
+ decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys())
251
+ if "encoder_hidden_states" not in decoder_signature:
252
+ raise ValueError(
253
+ "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
254
+ "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
255
+ )
256
+
257
+ # tie encoder, decoder weights if config set accordingly
258
+ self.tie_weights()
259
+
260
+ def tie_weights(self):
261
+ # tie encoder & decoder if needed
262
+ if self.config.tie_encoder_decoder:
263
+ # tie encoder and decoder base model
264
+ decoder_base_model_prefix = self.decoder.base_model_prefix
265
+ tied_weights = self._tie_encoder_decoder_weights(
266
+ self.encoder,
267
+ self.decoder._modules[decoder_base_model_prefix],
268
+ self.decoder.base_model_prefix,
269
+ "encoder",
270
+ )
271
+ # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class
272
+ # attributed not an instance member, therefore modifying it will modify the entire class
273
+ # Leading to issues on subsequent calls by different tests or subsequent calls.
274
+ self._dynamic_tied_weights_keys = tied_weights
275
+
276
+ def get_encoder(self):
277
+ return self.encoder
278
+
279
+ def get_decoder(self):
280
+ return self.decoder
281
+
282
+ def get_input_embeddings(self):
283
+ return self.encoder.get_input_embeddings()
284
+
285
+ def get_output_embeddings(self):
286
+ return self.decoder.get_output_embeddings()
287
+
288
+ def set_output_embeddings(self, new_embeddings):
289
+ return self.decoder.set_output_embeddings(new_embeddings)
290
+
291
+ @classmethod
292
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
293
+ r"""
294
+ Example:
295
+
296
+ ```python
297
+ >>> from transformers import EncoderDecoderModel
298
+
299
+ >>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16")
300
+ ```"""
301
+
302
+ from_tf = kwargs.pop("from_tf", False)
303
+ if from_tf:
304
+ from transformers import TFEncoderDecoderModel
305
+
306
+ # a workaround to load from tensorflow checkpoint
307
+ # Using `_tf_model` won't work, because the weight names in the encoder/decoder of `_tf_model` get
308
+ # extended before saving those components. For example, The name of `_tf_model.encoder.vit` is
309
+ # `[top model name]/encoder/vit`, but the name of `tf_model.encoder.vit` is `[top model name]/vit`. The
310
+ # [top model name] is handled (stripped) by the conversion method, and the former case gets extra `encoder`,
311
+ # which should not occur when we want to save the components alone.
312
+ # There was a (very) ugly potential fix, which wasn't integrated to `transformers`: see
313
+ # https://github.com/huggingface/transformers/pull/13222/commits/dbb3c9de76eee235791d2064094654637c99f36d#r697304245
314
+ # (the change in `src/transformers/modeling_tf_utils.py`)
315
+ _tf_model = TFEncoderDecoderModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
316
+ config = _tf_model.config
317
+
318
+ # Using `tf_model` instead
319
+ encoder = _tf_model.encoder.__class__(_tf_model.config.encoder)
320
+ decoder = _tf_model.decoder.__class__(_tf_model.config.decoder)
321
+ # Make sure models are built
322
+ encoder(encoder.dummy_inputs)
323
+ decoder(decoder.dummy_inputs)
324
+
325
+ # Get the variable correspondence between `_tf_model` and `encoder` and `decoder`
326
+ encoder_variables = {}
327
+ for v in encoder.trainable_variables + encoder.non_trainable_variables:
328
+ encoder_variables["/".join(v.name.split("/")[1:])] = v
329
+ decoder_variables = {}
330
+ for v in decoder.trainable_variables + decoder.non_trainable_variables:
331
+ decoder_variables["/".join(v.name.split("/")[1:])] = v
332
+
333
+ _encoder_variables = {}
334
+ for v in _tf_model.encoder.trainable_variables + _tf_model.encoder.non_trainable_variables:
335
+ _encoder_variables["/".join(v.name.split("/")[2:])] = v
336
+ _decoder_variables = {}
337
+ for v in _tf_model.decoder.trainable_variables + _tf_model.decoder.non_trainable_variables:
338
+ _decoder_variables["/".join(v.name.split("/")[2:])] = v
339
+
340
+ # assign weight values to `encoder` and `decoder` from `_tf_model`
341
+ for name, v in encoder_variables.items():
342
+ v.assign(_encoder_variables[name])
343
+ for name, v in decoder_variables.items():
344
+ v.assign(_decoder_variables[name])
345
+
346
+ tf_model = TFEncoderDecoderModel(encoder=encoder, decoder=decoder)
347
+
348
+ # Deal with `enc_to_dec_proj`
349
+ if hasattr(_tf_model, "enc_to_dec_proj"):
350
+ tf_model(tf_model.dummy_inputs)
351
+ tf_model.enc_to_dec_proj.kernel.assign(_tf_model.enc_to_dec_proj.kernel)
352
+ tf_model.enc_to_dec_proj.bias.assign(_tf_model.enc_to_dec_proj.bias)
353
+
354
+ with tempfile.TemporaryDirectory() as tmpdirname:
355
+ encoder_dir = os.path.join(tmpdirname, "encoder")
356
+ decoder_dir = os.path.join(tmpdirname, "decoder")
357
+ tf_model.encoder.save_pretrained(encoder_dir)
358
+ tf_model.decoder.save_pretrained(decoder_dir)
359
+
360
+ if hasattr(tf_model, "enc_to_dec_proj"):
361
+ enc_to_dec_proj_weight = torch.transpose(
362
+ torch.from_numpy(tf_model.enc_to_dec_proj.kernel.numpy()), 1, 0
363
+ )
364
+ enc_to_dec_proj_bias = torch.from_numpy(tf_model.enc_to_dec_proj.bias.numpy())
365
+
366
+ del _tf_model
367
+ del tf_model
368
+ gc.collect()
369
+
370
+ model = EncoderDecoderModel.from_encoder_decoder_pretrained(
371
+ encoder_dir, decoder_dir, encoder_from_tf=True, decoder_from_tf=True
372
+ )
373
+ # This is only for copying some specific attributes of this particular model.
374
+ model.config = config
375
+
376
+ if hasattr(model, "enc_to_dec_proj"):
377
+ model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous()
378
+ model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous()
379
+
380
+ return model
381
+
382
+ # At the moment fast initialization is not supported for composite models
383
+ if kwargs.get("_fast_init", False):
384
+ logger.warning(
385
+ "Fast initialization is currently not supported for EncoderDecoderModel. "
386
+ "Falling back to slow initialization..."
387
+ )
388
+ kwargs["_fast_init"] = False
389
+
390
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
391
+
392
+ @classmethod
393
+ def from_encoder_decoder_pretrained(
394
+ cls,
395
+ encoder_pretrained_model_name_or_path: str = None,
396
+ decoder_pretrained_model_name_or_path: str = None,
397
+ *model_args,
398
+ **kwargs,
399
+ ) -> PreTrainedModel:
400
+ r"""
401
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
402
+ checkpoints.
403
+
404
+
405
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
406
+ the model, you need to first set it back in training mode with `model.train()`.
407
+
408
+ Params:
409
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
410
+ Information necessary to initiate the encoder. Can be either:
411
+
412
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
413
+ - A path to a *directory* containing model weights saved using
414
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
415
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
416
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
417
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
418
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
419
+
420
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
421
+ Information necessary to initiate the decoder. Can be either:
422
+
423
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
424
+ - A path to a *directory* containing model weights saved using
425
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
426
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
427
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
428
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
429
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
430
+
431
+ model_args (remaining positional arguments, *optional*):
432
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
433
+
434
+ kwargs (remaining dictionary of keyword arguments, *optional*):
435
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
436
+ `output_attentions=True`).
437
+
438
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
439
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
440
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
441
+
442
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
443
+
444
+ Example:
445
+
446
+ ```python
447
+ >>> from transformers import EncoderDecoderModel
448
+
449
+ >>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
450
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased")
451
+ >>> # saving model after fine-tuning
452
+ >>> model.save_pretrained("./bert2bert")
453
+ >>> # load fine-tuned model
454
+ >>> model = EncoderDecoderModel.from_pretrained("./bert2bert")
455
+ ```"""
456
+
457
+ kwargs_encoder = {
458
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
459
+ }
460
+
461
+ kwargs_decoder = {
462
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
463
+ }
464
+
465
+ # remove encoder, decoder kwargs from kwargs
466
+ for key in kwargs_encoder.keys():
467
+ del kwargs["encoder_" + key]
468
+ for key in kwargs_decoder.keys():
469
+ del kwargs["decoder_" + key]
470
+
471
+ # Load and initialize the encoder and decoder
472
+ # The distinction between encoder and decoder at the model level is made
473
+ # by the value of the flag `is_decoder` that we need to set correctly.
474
+ encoder = kwargs_encoder.pop("model", None)
475
+ if encoder is None:
476
+ if encoder_pretrained_model_name_or_path is None:
477
+ raise ValueError(
478
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
479
+ "to be defined."
480
+ )
481
+
482
+ if "config" not in kwargs_encoder:
483
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
484
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
485
+ )
486
+
487
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
488
+ logger.info(
489
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
490
+ "from a decoder model. Cross-attention and casual mask are disabled."
491
+ )
492
+ encoder_config.is_decoder = False
493
+ encoder_config.add_cross_attention = False
494
+
495
+ kwargs_encoder["config"] = encoder_config
496
+
497
+ encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
498
+
499
+ decoder = kwargs_decoder.pop("model", None)
500
+ if decoder is None:
501
+ if decoder_pretrained_model_name_or_path is None:
502
+ raise ValueError(
503
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
504
+ "to be defined."
505
+ )
506
+
507
+ if "config" not in kwargs_decoder:
508
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
509
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
510
+ )
511
+
512
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
513
+ logger.info(
514
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
515
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
516
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
517
+ )
518
+ decoder_config.is_decoder = True
519
+ decoder_config.add_cross_attention = True
520
+
521
+ kwargs_decoder["config"] = decoder_config
522
+
523
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
524
+ logger.warning(
525
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
526
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
527
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
528
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
529
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
530
+ )
531
+
532
+ decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
533
+
534
+ # instantiate config with corresponding kwargs
535
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
536
+ return cls(encoder=encoder, decoder=decoder, config=config)
537
+
538
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
539
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
540
+ def forward(
541
+ self,
542
+ input_ids: Optional[torch.LongTensor] = None,
543
+ attention_mask: Optional[torch.FloatTensor] = None,
544
+ decoder_input_ids: Optional[torch.LongTensor] = None,
545
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
546
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
547
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
548
+ inputs_embeds: Optional[torch.FloatTensor] = None,
549
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
550
+ labels: Optional[torch.LongTensor] = None,
551
+ use_cache: Optional[bool] = None,
552
+ output_attentions: Optional[bool] = None,
553
+ output_hidden_states: Optional[bool] = None,
554
+ return_dict: Optional[bool] = None,
555
+ **kwargs,
556
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
557
+ r"""
558
+ Returns:
559
+
560
+ Examples:
561
+
562
+ ```python
563
+ >>> from transformers import EncoderDecoderModel, BertTokenizer
564
+ >>> import torch
565
+
566
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
567
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
568
+ ... "google-bert/bert-base-uncased", "google-bert/bert-base-uncased"
569
+ ... ) # initialize Bert2Bert from pre-trained checkpoints
570
+
571
+ >>> # training
572
+ >>> model.config.decoder_start_token_id = tokenizer.cls_token_id
573
+ >>> model.config.pad_token_id = tokenizer.pad_token_id
574
+ >>> model.config.vocab_size = model.config.decoder.vocab_size
575
+
576
+ >>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids
577
+ >>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids
578
+ >>> outputs = model(input_ids=input_ids, labels=labels)
579
+ >>> loss, logits = outputs.loss, outputs.logits
580
+
581
+ >>> # save and load from pretrained
582
+ >>> model.save_pretrained("bert2bert")
583
+ >>> model = EncoderDecoderModel.from_pretrained("bert2bert")
584
+
585
+ >>> # generation
586
+ >>> generated = model.generate(input_ids)
587
+ ```"""
588
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
589
+
590
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
591
+
592
+ kwargs_decoder = {
593
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
594
+ }
595
+
596
+ if encoder_outputs is None:
597
+ encoder_outputs = self.encoder(
598
+ input_ids=input_ids,
599
+ attention_mask=attention_mask,
600
+ inputs_embeds=inputs_embeds,
601
+ output_attentions=output_attentions,
602
+ output_hidden_states=output_hidden_states,
603
+ return_dict=return_dict,
604
+ **kwargs_encoder,
605
+ )
606
+ elif isinstance(encoder_outputs, tuple):
607
+ encoder_outputs = BaseModelOutput(*encoder_outputs)
608
+
609
+ encoder_hidden_states = encoder_outputs[0]
610
+
611
+ # optionally project encoder_hidden_states
612
+ if (
613
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
614
+ and self.decoder.config.cross_attention_hidden_size is None
615
+ ):
616
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
617
+
618
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
619
+ decoder_input_ids = shift_tokens_right(
620
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
621
+ )
622
+ if decoder_attention_mask is None:
623
+ decoder_attention_mask = decoder_input_ids.new_tensor(decoder_input_ids != self.config.pad_token_id)
624
+
625
+ # Decode
626
+ decoder_outputs = self.decoder(
627
+ input_ids=decoder_input_ids,
628
+ attention_mask=decoder_attention_mask,
629
+ encoder_hidden_states=encoder_hidden_states,
630
+ encoder_attention_mask=attention_mask,
631
+ inputs_embeds=decoder_inputs_embeds,
632
+ output_attentions=output_attentions,
633
+ output_hidden_states=output_hidden_states,
634
+ use_cache=use_cache,
635
+ past_key_values=past_key_values,
636
+ return_dict=return_dict,
637
+ **kwargs_decoder,
638
+ )
639
+
640
+ # Compute loss independent from decoder (as some shift the logits inside them)
641
+ loss = None
642
+ if labels is not None:
643
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
644
+ logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
645
+ loss_fct = CrossEntropyLoss()
646
+ loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))
647
+
648
+ if not return_dict:
649
+ if loss is not None:
650
+ return (loss,) + decoder_outputs + encoder_outputs
651
+ else:
652
+ return decoder_outputs + encoder_outputs
653
+
654
+ return Seq2SeqLMOutput(
655
+ loss=loss,
656
+ logits=decoder_outputs.logits,
657
+ past_key_values=decoder_outputs.past_key_values,
658
+ decoder_hidden_states=decoder_outputs.hidden_states,
659
+ decoder_attentions=decoder_outputs.attentions,
660
+ cross_attentions=decoder_outputs.cross_attentions,
661
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
662
+ encoder_hidden_states=encoder_outputs.hidden_states,
663
+ encoder_attentions=encoder_outputs.attentions,
664
+ )
665
+
666
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
667
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
668
+
669
+ def prepare_inputs_for_generation(
670
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
671
+ ):
672
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
673
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
674
+ input_dict = {
675
+ "attention_mask": attention_mask,
676
+ "decoder_attention_mask": decoder_attention_mask,
677
+ "decoder_input_ids": decoder_inputs["input_ids"],
678
+ "encoder_outputs": encoder_outputs,
679
+ "past_key_values": decoder_inputs["past_key_values"],
680
+ "use_cache": use_cache,
681
+ }
682
+ return input_dict
683
+
684
+ def resize_token_embeddings(self, *args, **kwargs):
685
+ raise NotImplementedError(
686
+ "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the"
687
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
688
+ " model.decoder.resize_token_embeddings(...))"
689
+ )
690
+
691
+ def _reorder_cache(self, past_key_values, beam_idx):
692
+ # apply decoder cache reordering here
693
+ return self.decoder._reorder_cache(past_key_values, beam_idx)
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py ADDED
@@ -0,0 +1,899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Classes to support Flax Encoder-Decoder architectures"""
16
+
17
+
18
+ import os
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import flax.linen as nn
22
+ import jax
23
+ import jax.numpy as jnp
24
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+ from jax.random import PRNGKey
28
+
29
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput
30
+ from ...modeling_flax_utils import FlaxPreTrainedModel
31
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
32
+ from ..auto.configuration_auto import AutoConfig
33
+ from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM
34
+ from .configuration_encoder_decoder import EncoderDecoderConfig
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ _CONFIG_FOR_DOC = "EncoderDecoderConfig"
40
+
41
+ ENCODER_DECODER_START_DOCSTRING = r"""
42
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
43
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
44
+ [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]
45
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
46
+ generative task, like summarization.
47
+
48
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
49
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
50
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
51
+ Zhou, Wei Li, Peter J. Liu.
52
+
53
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
54
+ (see the examples for more information).
55
+
56
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
57
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
58
+ etc.)
59
+
60
+ This model is also a Flax Linen
61
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
62
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
63
+
64
+ Parameters:
65
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
66
+ Initializing with a config file does not load the weights associated with the model, only the
67
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
68
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
69
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
70
+ `jax.numpy.bfloat16` (on TPUs).
71
+
72
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
73
+ specified all the computation will be performed with the given `dtype`.
74
+
75
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
76
+ parameters.**
77
+
78
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
79
+ [`~FlaxPreTrainedModel.to_bf16`].
80
+ """
81
+
82
+ ENCODER_DECODER_INPUTS_DOCSTRING = r"""
83
+ Args:
84
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
85
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
86
+ it.
87
+
88
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
89
+ [`PreTrainedTokenizer.__call__`] for details.
90
+
91
+ [What are input IDs?](../glossary#input-ids)
92
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
93
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
94
+
95
+ - 1 for tokens that are **not masked**,
96
+ - 0 for tokens that are **masked**.
97
+
98
+ [What are attention masks?](../glossary#attention-mask)
99
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
100
+ Indices of decoder input sequence tokens in the vocabulary.
101
+
102
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
103
+ [`PreTrainedTokenizer.__call__`] for details.
104
+
105
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
106
+
107
+ For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be
108
+ created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`
109
+ and prepending them with the `decoder_start_token_id`.
110
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
111
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
112
+ be used by default.
113
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
114
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
115
+ config.encoder.max_position_embeddings - 1]`.
116
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
117
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
118
+ range `[0, config.decoder.max_position_embeddings - 1]`.
119
+ output_attentions (`bool`, *optional*):
120
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
121
+ tensors for more detail.
122
+ output_hidden_states (`bool`, *optional*):
123
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
124
+ more detail.
125
+ return_dict (`bool`, *optional*):
126
+ If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple.
127
+ """
128
+
129
+ ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r"""
130
+ Args:
131
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
132
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
133
+ it.
134
+
135
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
136
+ [`PreTrainedTokenizer.__call__`] for details.
137
+
138
+ [What are input IDs?](../glossary#input-ids)
139
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
140
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
141
+
142
+ - 1 for tokens that are **not masked**,
143
+ - 0 for tokens that are **masked**.
144
+
145
+ [What are attention masks?](../glossary#attention-mask)
146
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
147
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
148
+ config.encoder.max_position_embeddings - 1]`.
149
+ output_attentions (`bool`, *optional*):
150
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
151
+ tensors for more detail.
152
+ output_hidden_states (`bool`, *optional*):
153
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
154
+ more detail.
155
+ return_dict (`bool`, *optional*):
156
+ If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple.
157
+ """
158
+
159
+ ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r"""
160
+ Args:
161
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
162
+ Indices of decoder input sequence tokens in the vocabulary.
163
+
164
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
165
+ [`PreTrainedTokenizer.__call__`] for details.
166
+
167
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
168
+
169
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
170
+ `past_key_values`).
171
+
172
+ For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be
173
+ created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`
174
+ and prepending them with the `decoder_start_token_id`.
175
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
176
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
177
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
178
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
179
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
180
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
181
+
182
+ - 1 for tokens that are **not masked**,
183
+ - 0 for tokens that are **masked**.
184
+
185
+ [What are attention masks?](../glossary#attention-mask)
186
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
187
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
188
+ be used by default.
189
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
190
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
191
+ range `[0, config.decoder.max_position_embeddings - 1]`.
192
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
193
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
194
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
195
+ output_attentions (`bool`, *optional*):
196
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
197
+ tensors for more detail.
198
+ output_hidden_states (`bool`, *optional*):
199
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
200
+ more detail.
201
+ return_dict (`bool`, *optional*):
202
+ If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a
203
+ plain tuple.
204
+ """
205
+
206
+
207
+ class FlaxEncoderDecoderModule(nn.Module):
208
+ config: EncoderDecoderConfig
209
+ dtype: jnp.dtype = jnp.float32
210
+
211
+ def setup(self):
212
+ encoder_config = self.config.encoder
213
+ decoder_config = self.config.decoder
214
+
215
+ # Copied from `modeling_hybrid_clip.py` with modifications.
216
+ from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING
217
+
218
+ encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class
219
+ decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class
220
+
221
+ self.encoder = encoder_module(encoder_config, dtype=self.dtype)
222
+ self.decoder = decoder_module(decoder_config, dtype=self.dtype)
223
+
224
+ # encoder outputs might need to be projected to different dimension for decoder
225
+ if (
226
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
227
+ and self.decoder.config.cross_attention_hidden_size is None
228
+ ):
229
+ self.enc_to_dec_proj = nn.Dense(
230
+ self.decoder.config.hidden_size,
231
+ kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range),
232
+ dtype=self.dtype,
233
+ )
234
+ else:
235
+ self.enc_to_dec_proj = None
236
+
237
+ def _get_encoder_module(self):
238
+ return self.encoder
239
+
240
+ def _get_projection_module(self):
241
+ return self.enc_to_dec_proj
242
+
243
+ def _get_decoder_module(self):
244
+ return self.decoder
245
+
246
+ def __call__(
247
+ self,
248
+ input_ids,
249
+ attention_mask,
250
+ decoder_input_ids,
251
+ decoder_attention_mask,
252
+ position_ids,
253
+ decoder_position_ids,
254
+ output_attentions: bool = False,
255
+ output_hidden_states: bool = False,
256
+ return_dict: bool = True,
257
+ deterministic: bool = True,
258
+ ):
259
+ encoder_outputs = self.encoder(
260
+ input_ids=input_ids,
261
+ attention_mask=attention_mask,
262
+ position_ids=position_ids,
263
+ output_attentions=output_attentions,
264
+ output_hidden_states=output_hidden_states,
265
+ return_dict=return_dict,
266
+ deterministic=deterministic,
267
+ )
268
+
269
+ encoder_hidden_states = encoder_outputs[0]
270
+
271
+ # optionally project encoder_hidden_states
272
+ if self.enc_to_dec_proj is not None:
273
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
274
+
275
+ decoder_outputs = self.decoder(
276
+ input_ids=decoder_input_ids,
277
+ attention_mask=decoder_attention_mask,
278
+ position_ids=decoder_position_ids,
279
+ encoder_hidden_states=encoder_hidden_states,
280
+ encoder_attention_mask=attention_mask,
281
+ output_attentions=output_attentions,
282
+ output_hidden_states=output_hidden_states,
283
+ return_dict=return_dict,
284
+ deterministic=deterministic,
285
+ )
286
+
287
+ if not return_dict:
288
+ return decoder_outputs + encoder_outputs
289
+
290
+ return FlaxSeq2SeqLMOutput(
291
+ logits=decoder_outputs.logits,
292
+ decoder_hidden_states=decoder_outputs.hidden_states,
293
+ decoder_attentions=decoder_outputs.attentions,
294
+ cross_attentions=decoder_outputs.cross_attentions,
295
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
296
+ encoder_hidden_states=encoder_outputs.hidden_states,
297
+ encoder_attentions=encoder_outputs.attentions,
298
+ )
299
+
300
+
301
+ @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
302
+ class FlaxEncoderDecoderModel(FlaxPreTrainedModel):
303
+ r"""
304
+ [`FlaxEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with
305
+ the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as
306
+ decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the
307
+ encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder.
308
+ """
309
+
310
+ config_class = EncoderDecoderConfig
311
+ base_model_prefix = "encoder_decoder"
312
+ module_class = FlaxEncoderDecoderModule
313
+
314
+ def __init__(
315
+ self,
316
+ config: EncoderDecoderConfig,
317
+ input_shape: Optional[Tuple] = None,
318
+ seed: int = 0,
319
+ dtype: jnp.dtype = jnp.float32,
320
+ _do_init: bool = True,
321
+ **kwargs,
322
+ ):
323
+ if input_shape is None:
324
+ input_shape = ((1, 1), (1, 1))
325
+
326
+ if not _do_init:
327
+ raise ValueError(
328
+ "`FlaxEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`."
329
+ )
330
+
331
+ if config.decoder.cross_attention_hidden_size is not None:
332
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
333
+ raise ValueError(
334
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
335
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
336
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
337
+ " `config.encoder.hidden_size`."
338
+ )
339
+
340
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
341
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
342
+
343
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
344
+ encoder_input_shape, decoder_input_shape = input_shape
345
+
346
+ # init input tensors
347
+ input_ids = jnp.zeros(encoder_input_shape, dtype="i4")
348
+ attention_mask = jnp.ones_like(input_ids)
349
+ decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4")
350
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
351
+
352
+ batch_size, sequence_length = input_ids.shape
353
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
354
+
355
+ decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape
356
+ if not decoder_batch_size == batch_size:
357
+ raise ValueError(
358
+ f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder"
359
+ f" and {decoder_batch_size} for decoder."
360
+ )
361
+ decoder_position_ids = jnp.broadcast_to(
362
+ jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length)
363
+ )
364
+
365
+ params_rng, dropout_rng = jax.random.split(rng)
366
+ rngs = {"params": params_rng, "dropout": dropout_rng}
367
+
368
+ random_params = self.module.init(
369
+ rngs,
370
+ input_ids,
371
+ attention_mask,
372
+ decoder_input_ids,
373
+ decoder_attention_mask,
374
+ position_ids,
375
+ decoder_position_ids,
376
+ )["params"]
377
+
378
+ if params is not None:
379
+ random_params = flatten_dict(unfreeze(random_params))
380
+ params = flatten_dict(unfreeze(params))
381
+ for missing_key in self._missing_keys:
382
+ params[missing_key] = random_params[missing_key]
383
+ self._missing_keys = set()
384
+ return freeze(unflatten_dict(params))
385
+ else:
386
+ return random_params
387
+
388
+ def init_cache(self, batch_size, max_length, encoder_outputs):
389
+ r"""
390
+ Args:
391
+ batch_size (`int`):
392
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
393
+ max_length (`int`):
394
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
395
+ cache.
396
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
397
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
398
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
399
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
400
+ cross-attention of the decoder.
401
+ """
402
+ # init input variables to retrieve cache
403
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
404
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
405
+ decoder_position_ids = jnp.broadcast_to(
406
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
407
+ )
408
+
409
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
410
+ decoder_module = module._get_decoder_module()
411
+ return decoder_module(
412
+ input_ids=decoder_input_ids,
413
+ attention_mask=decoder_attention_mask,
414
+ position_ids=decoder_position_ids,
415
+ **kwargs,
416
+ )
417
+
418
+ init_variables = self.module.init(
419
+ jax.random.PRNGKey(0),
420
+ decoder_input_ids=decoder_input_ids,
421
+ decoder_attention_mask=decoder_attention_mask,
422
+ decoder_position_ids=decoder_position_ids,
423
+ encoder_hidden_states=encoder_outputs[0],
424
+ init_cache=True,
425
+ method=_decoder_forward, # we only need to call the decoder to init the cache
426
+ )
427
+ return unfreeze(init_variables["cache"])
428
+
429
+ @add_start_docstrings(ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING)
430
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC)
431
+ def encode(
432
+ self,
433
+ input_ids: jnp.ndarray,
434
+ attention_mask: Optional[jnp.ndarray] = None,
435
+ position_ids: Optional[jnp.ndarray] = None,
436
+ output_attentions: Optional[bool] = None,
437
+ output_hidden_states: Optional[bool] = None,
438
+ return_dict: Optional[bool] = None,
439
+ train: bool = False,
440
+ params: dict = None,
441
+ dropout_rng: PRNGKey = None,
442
+ ):
443
+ r"""
444
+ Returns:
445
+
446
+ Example:
447
+
448
+ ```python
449
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer
450
+
451
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
452
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
453
+
454
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
455
+
456
+ >>> text = "My friends are cool but they eat too many carbs."
457
+ >>> input_ids = tokenizer.encode(text, return_tensors="np")
458
+ >>> encoder_outputs = model.encode(input_ids)
459
+ ```"""
460
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
461
+ output_hidden_states = (
462
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
463
+ )
464
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
465
+
466
+ if attention_mask is None:
467
+ attention_mask = jnp.ones_like(input_ids)
468
+ if position_ids is None:
469
+ batch_size, sequence_length = input_ids.shape
470
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
471
+
472
+ # Handle any PRNG if needed
473
+ rngs = {}
474
+ if dropout_rng is not None:
475
+ rngs["dropout"] = dropout_rng
476
+
477
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
478
+ encode_module = module._get_encoder_module()
479
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
480
+
481
+ outputs = self.module.apply(
482
+ {"params": params or self.params},
483
+ input_ids=jnp.array(input_ids, dtype="i4"),
484
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
485
+ position_ids=jnp.array(position_ids, dtype="i4"),
486
+ output_attentions=output_attentions,
487
+ output_hidden_states=output_hidden_states,
488
+ return_dict=return_dict,
489
+ deterministic=not train,
490
+ rngs=rngs,
491
+ method=_encoder_forward,
492
+ )
493
+
494
+ if return_dict:
495
+ outputs = FlaxBaseModelOutput(
496
+ last_hidden_state=outputs.last_hidden_state,
497
+ hidden_states=outputs.hidden_states,
498
+ attentions=outputs.attentions,
499
+ )
500
+
501
+ return outputs
502
+
503
+ @add_start_docstrings(ENCODER_DECODER_DECODE_INPUTS_DOCSTRING)
504
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
505
+ def decode(
506
+ self,
507
+ decoder_input_ids,
508
+ encoder_outputs,
509
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
510
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
511
+ decoder_position_ids: Optional[jnp.ndarray] = None,
512
+ past_key_values: dict = None,
513
+ output_attentions: Optional[bool] = None,
514
+ output_hidden_states: Optional[bool] = None,
515
+ return_dict: Optional[bool] = None,
516
+ train: bool = False,
517
+ params: dict = None,
518
+ dropout_rng: PRNGKey = None,
519
+ ):
520
+ r"""
521
+ Returns:
522
+
523
+ Example:
524
+
525
+ ```python
526
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer
527
+ >>> import jax.numpy as jnp
528
+
529
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
530
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
531
+
532
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
533
+
534
+ >>> text = "My friends are cool but they eat too many carbs."
535
+ >>> input_ids = tokenizer.encode(text, max_length=1024, return_tensors="np")
536
+ >>> encoder_outputs = model.encode(input_ids)
537
+
538
+ >>> decoder_start_token_id = model.config.decoder.bos_token_id
539
+ >>> decoder_input_ids = jnp.ones((input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
540
+
541
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
542
+ >>> logits = outputs.logits
543
+ ```"""
544
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
545
+ output_hidden_states = (
546
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
547
+ )
548
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
549
+
550
+ encoder_hidden_states = encoder_outputs[0]
551
+ if encoder_attention_mask is None:
552
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
553
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
554
+
555
+ batch_size, sequence_length = decoder_input_ids.shape
556
+ if decoder_attention_mask is None:
557
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
558
+
559
+ if decoder_position_ids is None:
560
+ if past_key_values is not None:
561
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
562
+
563
+ decoder_position_ids = jnp.broadcast_to(
564
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
565
+ )
566
+
567
+ # Handle any PRNG if needed
568
+ rngs = {}
569
+ if dropout_rng is not None:
570
+ rngs["dropout"] = dropout_rng
571
+
572
+ inputs = {"params": params or self.params}
573
+
574
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
575
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
576
+ # it can be changed by FlaxBartAttention module
577
+ if past_key_values:
578
+ inputs["cache"] = past_key_values
579
+ mutable = ["cache"]
580
+ else:
581
+ mutable = False
582
+
583
+ def _decoder_forward(
584
+ module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs
585
+ ):
586
+ projection_module = module._get_projection_module()
587
+ decoder_module = module._get_decoder_module()
588
+
589
+ # optionally project encoder_hidden_states
590
+ if projection_module is not None:
591
+ encoder_hidden_states = projection_module(encoder_hidden_states)
592
+
593
+ return decoder_module(
594
+ decoder_input_ids,
595
+ decoder_attention_mask,
596
+ decoder_position_ids,
597
+ encoder_hidden_states=encoder_hidden_states,
598
+ **kwargs,
599
+ )
600
+
601
+ outputs = self.module.apply(
602
+ inputs,
603
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
604
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
605
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
606
+ encoder_hidden_states=encoder_hidden_states,
607
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
608
+ output_attentions=output_attentions,
609
+ output_hidden_states=output_hidden_states,
610
+ return_dict=return_dict,
611
+ deterministic=not train,
612
+ rngs=rngs,
613
+ mutable=mutable,
614
+ method=_decoder_forward,
615
+ )
616
+
617
+ # add updated cache to model output
618
+ if past_key_values is not None and return_dict:
619
+ outputs, past = outputs
620
+ outputs["past_key_values"] = unfreeze(past["cache"])
621
+ return outputs
622
+ elif past_key_values is not None and not return_dict:
623
+ outputs, past = outputs
624
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
625
+
626
+ return outputs
627
+
628
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
629
+ @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
630
+ def __call__(
631
+ self,
632
+ input_ids: jnp.ndarray,
633
+ attention_mask: Optional[jnp.ndarray] = None,
634
+ decoder_input_ids: Optional[jnp.ndarray] = None,
635
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
636
+ position_ids: Optional[jnp.ndarray] = None,
637
+ decoder_position_ids: Optional[jnp.ndarray] = None,
638
+ output_attentions: Optional[bool] = None,
639
+ output_hidden_states: Optional[bool] = None,
640
+ return_dict: Optional[bool] = None,
641
+ train: bool = False,
642
+ params: dict = None,
643
+ dropout_rng: PRNGKey = None,
644
+ ):
645
+ r"""
646
+ Returns:
647
+
648
+ Examples:
649
+
650
+ ```python
651
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer, GPT2Tokenizer
652
+
653
+ >>> # load a fine-tuned bert2gpt2 model
654
+ >>> model = FlaxEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16")
655
+ >>> # load input & output tokenizer
656
+ >>> tokenizer_input = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
657
+ >>> tokenizer_output = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
658
+
659
+ >>> article = '''Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members
660
+ >>> singing a racist chant. SAE's national chapter suspended the students,
661
+ >>> but University of Oklahoma President David Boren took it a step further,
662
+ >>> saying the university's affiliation with the fraternity is permanently done.'''
663
+
664
+ >>> input_ids = tokenizer_input(article, add_special_tokens=True, return_tensors="np").input_ids
665
+
666
+ >>> # use GPT2's eos_token as the pad as well as eos token
667
+ >>> model.config.eos_token_id = model.config.decoder.eos_token_id
668
+ >>> model.config.pad_token_id = model.config.eos_token_id
669
+
670
+ >>> sequences = model.generate(input_ids, num_beams=4, max_length=12).sequences
671
+
672
+ >>> summary = tokenizer_output.batch_decode(sequences, skip_special_tokens=True)[0]
673
+ >>> assert summary == "SAS Alpha Epsilon suspended Sigma Alpha Epsilon members"
674
+ ```
675
+ """
676
+
677
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
678
+ output_hidden_states = (
679
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
680
+ )
681
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
682
+
683
+ # prepare encoder inputs
684
+ if attention_mask is None:
685
+ attention_mask = jnp.ones_like(input_ids)
686
+ if position_ids is None:
687
+ batch_size, sequence_length = input_ids.shape
688
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
689
+
690
+ # prepare decoder inputs
691
+ if decoder_input_ids is None:
692
+ raise ValueError(
693
+ "`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must"
694
+ " be specified as an input argument."
695
+ )
696
+ if decoder_attention_mask is None:
697
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
698
+ if decoder_position_ids is None:
699
+ batch_size, sequence_length = decoder_input_ids.shape
700
+ decoder_position_ids = jnp.broadcast_to(
701
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
702
+ )
703
+
704
+ # Handle any PRNG if needed
705
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
706
+
707
+ return self.module.apply(
708
+ {"params": params or self.params},
709
+ input_ids=jnp.array(input_ids, dtype="i4"),
710
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
711
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
712
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
713
+ position_ids=jnp.array(position_ids, dtype="i4"),
714
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
715
+ output_attentions=output_attentions,
716
+ output_hidden_states=output_hidden_states,
717
+ return_dict=return_dict,
718
+ deterministic=not train,
719
+ rngs=rngs,
720
+ )
721
+
722
+ def prepare_inputs_for_generation(
723
+ self,
724
+ decoder_input_ids,
725
+ max_length,
726
+ attention_mask: Optional[jax.Array] = None,
727
+ decoder_attention_mask: Optional[jax.Array] = None,
728
+ encoder_outputs=None,
729
+ **kwargs,
730
+ ):
731
+ # initializing the cache
732
+ batch_size, seq_length = decoder_input_ids.shape
733
+
734
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
735
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
736
+ # But since the decoder uses a causal mask, those positions are masked anyways.
737
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
738
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
739
+ if decoder_attention_mask is not None:
740
+ decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
741
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
742
+ else:
743
+ decoder_position_ids = jnp.broadcast_to(
744
+ jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)
745
+ )
746
+
747
+ return {
748
+ "past_key_values": past_key_values,
749
+ "encoder_outputs": encoder_outputs,
750
+ "encoder_attention_mask": attention_mask,
751
+ "decoder_attention_mask": extended_attention_mask,
752
+ "decoder_position_ids": decoder_position_ids,
753
+ }
754
+
755
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
756
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
757
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
758
+ return model_kwargs
759
+
760
+ @classmethod
761
+ def from_encoder_decoder_pretrained(
762
+ cls,
763
+ encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
764
+ decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
765
+ *model_args,
766
+ **kwargs,
767
+ ) -> FlaxPreTrainedModel:
768
+ r"""
769
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
770
+ checkpoints.
771
+
772
+ Params:
773
+ encoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*):
774
+ Information necessary to initiate the encoder. Can be either:
775
+
776
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
777
+ - A path to a *directory* containing model weights saved using
778
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
779
+
780
+ decoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*, defaults to `None`):
781
+ Information necessary to initiate the decoder. Can be either:
782
+
783
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
784
+ - A path to a *directory* containing model weights saved using
785
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
786
+
787
+ model_args (remaining positional arguments, *optional*):
788
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
789
+
790
+ kwargs (remaining dictionary of keyword arguments, *optional*):
791
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
792
+ `output_attentions=True`).
793
+
794
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
795
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
796
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
797
+
798
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
799
+
800
+ Example:
801
+
802
+ ```python
803
+ >>> from transformers import FlaxEncoderDecoderModel
804
+
805
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
806
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
807
+ >>> # saving model after fine-tuning
808
+ >>> model.save_pretrained("./bert2gpt2")
809
+ >>> # load fine-tuned model
810
+ >>> model = FlaxEncoderDecoderModel.from_pretrained("./bert2gpt2")
811
+ ```"""
812
+
813
+ kwargs_encoder = {
814
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
815
+ }
816
+
817
+ kwargs_decoder = {
818
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
819
+ }
820
+
821
+ # remove encoder, decoder kwargs from kwargs
822
+ for key in kwargs_encoder.keys():
823
+ del kwargs["encoder_" + key]
824
+ for key in kwargs_decoder.keys():
825
+ del kwargs["decoder_" + key]
826
+
827
+ # Load and initialize the encoder and decoder
828
+ # The distinction between encoder and decoder at the model level is made
829
+ # by the value of the flag `is_decoder` that we need to set correctly.
830
+ encoder = kwargs_encoder.pop("model", None)
831
+ if encoder is None:
832
+ if encoder_pretrained_model_name_or_path is None:
833
+ raise ValueError(
834
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
835
+ "to be defined."
836
+ )
837
+
838
+ if "config" not in kwargs_encoder:
839
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
840
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
841
+ )
842
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
843
+ logger.info(
844
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
845
+ "from a decoder model. Cross-attention and casual mask are disabled."
846
+ )
847
+ encoder_config.is_decoder = False
848
+ encoder_config.add_cross_attention = False
849
+
850
+ kwargs_encoder["config"] = encoder_config
851
+
852
+ encoder = FlaxAutoModel.from_pretrained(
853
+ encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
854
+ )
855
+
856
+ decoder = kwargs_decoder.pop("model", None)
857
+ if decoder is None:
858
+ if decoder_pretrained_model_name_or_path is None:
859
+ raise ValueError(
860
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
861
+ "to be defined."
862
+ )
863
+
864
+ if "config" not in kwargs_decoder:
865
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
866
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
867
+ )
868
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
869
+ logger.info(
870
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
871
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
872
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
873
+ )
874
+ decoder_config.is_decoder = True
875
+ decoder_config.add_cross_attention = True
876
+
877
+ kwargs_decoder["config"] = decoder_config
878
+
879
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
880
+ logger.warning(
881
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
882
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
883
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
884
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
885
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
886
+ )
887
+
888
+ decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
889
+
890
+ # instantiate config with corresponding kwargs
891
+ dtype = kwargs.pop("dtype", jnp.float32)
892
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
893
+
894
+ # init model
895
+ model = cls(config, dtype=dtype)
896
+ model.params["encoder"] = encoder.params
897
+ model.params["decoder"] = decoder.params
898
+
899
+ return model
llmeval-env/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Classes to support TF Encoder-Decoder architectures"""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import inspect
21
+ import re
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...configuration_utils import PretrainedConfig
29
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
30
+ from ...modeling_tf_utils import (
31
+ TFCausalLanguageModelingLoss,
32
+ TFModelInputType,
33
+ TFPreTrainedModel,
34
+ get_initializer,
35
+ keras,
36
+ unpack_inputs,
37
+ )
38
+ from ...tf_utils import shape_list
39
+ from ...utils import (
40
+ ModelOutput,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from ..auto.configuration_auto import AutoConfig
47
+ from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM
48
+ from .configuration_encoder_decoder import EncoderDecoderConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CONFIG_FOR_DOC = "EncoderDecoderConfig"
54
+
55
+ DEPRECATION_WARNING = (
56
+ "Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
57
+ " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
58
+ " fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the"
59
+ " labels, no need to pass them yourself anymore."
60
+ )
61
+
62
+ ENCODER_DECODER_START_DOCSTRING = r"""
63
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
64
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
65
+ [`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]
66
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
67
+ generative task, like summarization.
68
+
69
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
70
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
71
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
72
+ Zhou, Wei Li, Peter J. Liu.
73
+
74
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
75
+ (see the examples for more information).
76
+
77
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
78
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
79
+ etc.)
80
+
81
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
82
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
83
+ behavior.
84
+
85
+ Parameters:
86
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
87
+ Initializing with a config file does not load the weights associated with the model, only the
88
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
89
+ """
90
+
91
+ ENCODER_DECODER_INPUTS_DOCSTRING = r"""
92
+ Args:
93
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
94
+ Indices of input sequence tokens in the vocabulary.
95
+
96
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
97
+ [`PreTrainedTokenizer.__call__`] for details.
98
+
99
+ [What are input IDs?](../glossary#input-ids)
100
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
101
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
102
+
103
+ - 1 for tokens that are **not masked**,
104
+ - 0 for tokens that are **masked**.
105
+
106
+ [What are attention masks?](../glossary#attention-mask)
107
+ decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
108
+ Indices of decoder input sequence tokens in the vocabulary.
109
+
110
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
111
+ [`PreTrainedTokenizer.__call__`] for details.
112
+
113
+ [What are input IDs?](../glossary#input-ids)
114
+
115
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
116
+ `past_key_values`).
117
+
118
+ Provide for sequence to sequence training to the decoder. Indices can be obtained using
119
+ [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
120
+ details.
121
+ decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
122
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
123
+ be used by default.
124
+ encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):
125
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
126
+ `last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output
127
+ of the last layer of the encoder. Used in the cross-attention of the decoder.
128
+ past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
129
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
130
+
131
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
132
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
133
+ `decoder_input_ids` of shape `({0})`.
134
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
135
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
136
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
137
+ model's internal embedding lookup matrix.
138
+ decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
139
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
140
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
141
+ into associated vectors than the model's internal embedding lookup matrix.
142
+ labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
143
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
144
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
145
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
146
+ use_cache (`bool`, *optional*):
147
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
148
+ `past_key_values`).
149
+ output_attentions (`bool`, *optional*):
150
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
151
+ tensors for more detail.
152
+ output_hidden_states (`bool`, *optional*):
153
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
154
+ more detail.
155
+ return_dict (`bool`, *optional*):
156
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
157
+ training (`bool`, *optional*, defaults to `False`):
158
+ Whether or not to use the model in training mode (some modules like dropout modules have different
159
+ behaviors between training and evaluation).
160
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
161
+
162
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
163
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function.
164
+ """
165
+
166
+
167
+ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
168
+ if pad_token_id is None:
169
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
170
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
171
+
172
+ if decoder_start_token_id is None:
173
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
174
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
175
+
176
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
177
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
178
+ # replace possible -100 values in labels by `pad_token_id`
179
+ shifted_input_ids = tf.where(
180
+ shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
181
+ )
182
+
183
+ # "Verify that `labels` has only positive values and -100"
184
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
185
+
186
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
187
+ with tf.control_dependencies([assert_gte0]):
188
+ shifted_input_ids = tf.identity(shifted_input_ids)
189
+
190
+ return shifted_input_ids
191
+
192
+
193
+ @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
194
+ class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
195
+ r"""
196
+ [`TFEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
197
+ of the base model classes of the library as encoder and another one as decoder when created with the
198
+ [`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class
199
+ method for the decoder.
200
+ """
201
+
202
+ config_class = EncoderDecoderConfig
203
+ base_model_prefix = "encoder_decoder"
204
+ load_weight_prefix = "tf_encoder_decoder_model"
205
+
206
+ def __init__(
207
+ self,
208
+ config: Optional[PretrainedConfig] = None,
209
+ encoder: Optional[TFPreTrainedModel] = None,
210
+ decoder: Optional[TFPreTrainedModel] = None,
211
+ ):
212
+ if config is None and (encoder is None or decoder is None):
213
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
214
+ if config is None:
215
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
216
+ else:
217
+ if not isinstance(config, self.config_class):
218
+ raise ValueError(f"config: {config} has to be of type {self.config_class}")
219
+
220
+ if config.decoder.cross_attention_hidden_size is not None:
221
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
222
+ raise ValueError(
223
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
224
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
225
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
226
+ " `config.encoder.hidden_size`."
227
+ )
228
+
229
+ # initialize with config
230
+ super().__init__(config)
231
+
232
+ if encoder is None:
233
+ encoder = TFAutoModel.from_config(config.encoder, name="encoder")
234
+
235
+ if decoder is None:
236
+ decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder")
237
+
238
+ self.encoder = encoder
239
+ self.decoder = decoder
240
+
241
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
242
+ logger.warning(
243
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
244
+ f" {self.config.encoder}"
245
+ )
246
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
247
+ logger.warning(
248
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
249
+ f" {self.config.decoder}"
250
+ )
251
+
252
+ # make sure that the individual model's config refers to the shared config
253
+ # so that the updates to the config will be synced
254
+ self.encoder.config = self.config.encoder
255
+ self.decoder.config = self.config.decoder
256
+
257
+ # encoder outputs might need to be projected to different dimension for decoder
258
+ if (
259
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
260
+ and self.decoder.config.cross_attention_hidden_size is None
261
+ ):
262
+ self.enc_to_dec_proj = keras.layers.Dense(
263
+ units=self.decoder.config.hidden_size,
264
+ kernel_initializer=get_initializer(config.encoder.initializer_range),
265
+ name="enc_to_dec_proj",
266
+ )
267
+
268
+ if self.encoder.get_output_embeddings() is not None:
269
+ raise ValueError(
270
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
271
+ )
272
+
273
+ decoder_signature = set(inspect.signature(self.decoder.call).parameters.keys())
274
+ if "encoder_hidden_states" not in decoder_signature:
275
+ raise ValueError(
276
+ "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
277
+ "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
278
+ )
279
+
280
+ def get_encoder(self):
281
+ return self.encoder
282
+
283
+ def get_decoder(self):
284
+ return self.decoder
285
+
286
+ def get_input_embeddings(self):
287
+ return self.encoder.get_input_embeddings()
288
+
289
+ def get_output_embeddings(self):
290
+ return self.decoder.get_output_embeddings()
291
+
292
+ def set_output_embeddings(self, new_embeddings):
293
+ return self.decoder.set_output_embeddings(new_embeddings)
294
+
295
+ def tf_to_pt_weight_rename(self, tf_weight):
296
+ # Matt: The TF and PT weights don't align because our TF base classes have an extra layer compared to PT models
297
+ # (the main model stem is in the MainLayer class). If we remove that layer, then weight names sync up as normal.
298
+ # However, the name of that extra layer is the name of the MainLayer in the base model. We make the assumption
299
+ # here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's
300
+ # not the case, and I wasn't sure how else to go from the config to the correct MainLayer name!
301
+
302
+ # This override is only needed in the case where we're crossloading weights from PT. However, since weights are
303
+ # often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file.
304
+ # Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it
305
+ # or not.
306
+ encoder_model_type = self.config.encoder.model_type
307
+ if "encoder" in tf_weight and "decoder" not in tf_weight:
308
+ return (re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight),)
309
+ else:
310
+ return (tf_weight,)
311
+
312
+ @classmethod
313
+ def from_encoder_decoder_pretrained(
314
+ cls,
315
+ encoder_pretrained_model_name_or_path: str = None,
316
+ decoder_pretrained_model_name_or_path: str = None,
317
+ *model_args,
318
+ **kwargs,
319
+ ) -> TFPreTrainedModel:
320
+ r"""
321
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
322
+ checkpoints.
323
+
324
+
325
+ Params:
326
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
327
+ Information necessary to initiate the encoder. Can be either:
328
+
329
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
330
+ - A path to a *directory* containing model weights saved using
331
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
332
+ - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
333
+ `encoder_from_pt` should be set to `True`.
334
+
335
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
336
+ Information necessary to initiate the decoder. Can be either:
337
+
338
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
339
+ - A path to a *directory* containing model weights saved using
340
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
341
+ - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
342
+ `decoder_from_pt` should be set to `True`.
343
+
344
+ model_args (remaining positional arguments, *optional*):
345
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
346
+
347
+ kwargs (remaining dictionary of keyword arguments, *optional*):
348
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
349
+ `output_attentions=True`).
350
+
351
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
352
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
353
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
354
+
355
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
356
+
357
+ Example:
358
+
359
+ ```python
360
+ >>> from transformers import TFEncoderDecoderModel
361
+
362
+ >>> # initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
363
+ >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "openai-community/gpt2")
364
+ >>> # saving model after fine-tuning
365
+ >>> model.save_pretrained("./bert2gpt2")
366
+ >>> # load fine-tuned model
367
+ >>> model = TFEncoderDecoderModel.from_pretrained("./bert2gpt2")
368
+ ```"""
369
+
370
+ kwargs_encoder = {
371
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
372
+ }
373
+
374
+ kwargs_decoder = {
375
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
376
+ }
377
+
378
+ # remove encoder, decoder kwargs from kwargs
379
+ for key in kwargs_encoder.keys():
380
+ del kwargs["encoder_" + key]
381
+ for key in kwargs_decoder.keys():
382
+ del kwargs["decoder_" + key]
383
+
384
+ # Load and initialize the encoder and decoder
385
+ # The distinction between encoder and decoder at the model level is made
386
+ # by the value of the flag `is_decoder` that we need to set correctly.
387
+ encoder = kwargs_encoder.pop("model", None)
388
+ if encoder is None:
389
+ if encoder_pretrained_model_name_or_path is None:
390
+ raise ValueError(
391
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
392
+ "to be defined."
393
+ )
394
+
395
+ if "config" not in kwargs_encoder:
396
+ encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
397
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
398
+ logger.info(
399
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
400
+ "from a decoder model. Cross-attention and casual mask are disabled."
401
+ )
402
+ encoder_config.is_decoder = False
403
+ encoder_config.add_cross_attention = False
404
+
405
+ kwargs_encoder["config"] = encoder_config
406
+
407
+ kwargs_encoder["name"] = "encoder"
408
+ kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix
409
+ encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
410
+
411
+ decoder = kwargs_decoder.pop("model", None)
412
+ if decoder is None:
413
+ if decoder_pretrained_model_name_or_path is None:
414
+ raise ValueError(
415
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
416
+ "to be defined."
417
+ )
418
+
419
+ if "config" not in kwargs_decoder:
420
+ decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
421
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
422
+ logger.info(
423
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
424
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
425
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
426
+ )
427
+ decoder_config.is_decoder = True
428
+ decoder_config.add_cross_attention = True
429
+
430
+ kwargs_decoder["config"] = decoder_config
431
+
432
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
433
+ logger.warning(
434
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
435
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
436
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
437
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
438
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
439
+ )
440
+
441
+ kwargs_decoder["name"] = "decoder"
442
+ kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix
443
+ decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
444
+
445
+ # Make sure these 2 `keras.Model` have fixed names so `from_pretrained` could load model weights correctly.
446
+ if encoder.name != "encoder":
447
+ raise ValueError("encoder model must be created with the name `encoder`.")
448
+ if decoder.name != "decoder":
449
+ raise ValueError("decoder model must be created with the name `decoder`.")
450
+
451
+ # instantiate config with corresponding kwargs
452
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
453
+ return cls(encoder=encoder, decoder=decoder, config=config)
454
+
455
+ @unpack_inputs
456
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
457
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
458
+ def call(
459
+ self,
460
+ input_ids: TFModelInputType | None = None,
461
+ attention_mask: np.ndarray | tf.Tensor | None = None,
462
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
463
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
464
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
465
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
466
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
467
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
468
+ labels: np.ndarray | tf.Tensor | None = None,
469
+ use_cache: Optional[bool] = None,
470
+ output_attentions: Optional[bool] = None,
471
+ output_hidden_states: Optional[bool] = None,
472
+ return_dict: Optional[bool] = None,
473
+ training: bool = False,
474
+ **kwargs,
475
+ ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
476
+ r"""
477
+ Returns:
478
+
479
+ Examples:
480
+
481
+ ```python
482
+ >>> from transformers import TFEncoderDecoderModel, BertTokenizer
483
+
484
+ >>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
485
+ >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
486
+
487
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
488
+
489
+ >>> # forward
490
+ >>> input_ids = tokenizer.encode(
491
+ ... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf"
492
+ ... ) # Batch size 1
493
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
494
+
495
+ >>> # training
496
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)
497
+ >>> loss, logits = outputs.loss, outputs.logits
498
+
499
+ >>> # save and load from pretrained
500
+ >>> model.save_pretrained("bert2gpt2")
501
+ >>> model = TFEncoderDecoderModel.from_pretrained("bert2gpt2")
502
+
503
+ >>> # generation
504
+ >>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id)
505
+ ```"""
506
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
507
+
508
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
509
+
510
+ kwargs_decoder = {
511
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
512
+ }
513
+
514
+ # Let the user be responsible for the expected format.
515
+ if encoder_outputs is not None:
516
+ if return_dict and not isinstance(encoder_outputs, ModelOutput):
517
+ raise ValueError(
518
+ "If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of "
519
+ f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`."
520
+ )
521
+
522
+ if encoder_outputs is None:
523
+ encoder_inputs = {
524
+ "input_ids": input_ids,
525
+ "attention_mask": attention_mask,
526
+ "inputs_embeds": inputs_embeds,
527
+ "output_attentions": output_attentions,
528
+ "output_hidden_states": output_hidden_states,
529
+ "return_dict": return_dict,
530
+ "training": training,
531
+ }
532
+
533
+ # Add arguments to encoder from `kwargs_encoder`
534
+ encoder_inputs.update(kwargs_encoder)
535
+
536
+ # Handle the case where the inputs are passed as a single dict which contains `labels`.
537
+ # The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this
538
+ # parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`).
539
+ if "labels" in encoder_inputs:
540
+ labels = encoder_inputs.pop("labels")
541
+
542
+ # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
543
+ if "decoder_input_ids" in encoder_inputs:
544
+ decoder_input_ids = encoder_inputs.pop("decoder_input_ids")
545
+ # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
546
+ if "decoder_attention_mask" in encoder_inputs:
547
+ decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask")
548
+
549
+ encoder_outputs = self.encoder(**encoder_inputs)
550
+
551
+ encoder_hidden_states = encoder_outputs[0]
552
+
553
+ # optionally project encoder_hidden_states
554
+ if (
555
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
556
+ and self.decoder.config.cross_attention_hidden_size is None
557
+ ):
558
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
559
+
560
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
561
+ decoder_input_ids = shift_tokens_right(
562
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
563
+ )
564
+
565
+ decoder_inputs = {
566
+ "input_ids": decoder_input_ids,
567
+ "attention_mask": decoder_attention_mask,
568
+ "encoder_hidden_states": encoder_hidden_states,
569
+ "encoder_attention_mask": attention_mask,
570
+ "inputs_embeds": decoder_inputs_embeds,
571
+ "output_attentions": output_attentions,
572
+ "output_hidden_states": output_hidden_states,
573
+ "use_cache": use_cache,
574
+ "past_key_values": past_key_values,
575
+ "return_dict": return_dict,
576
+ "training": training,
577
+ }
578
+
579
+ # Add arguments to decoder from `kwargs_decoder`
580
+ decoder_inputs.update(kwargs_decoder)
581
+
582
+ decoder_outputs = self.decoder(**decoder_inputs)
583
+
584
+ logits = decoder_outputs[0]
585
+
586
+ # Compute loss independent from decoder (as some shift the logits inside them)
587
+ loss = None
588
+ if labels is not None:
589
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
590
+ loss = self.hf_compute_loss(labels, logits)
591
+
592
+ if not return_dict:
593
+ past_key_values = None
594
+ if use_cache:
595
+ past_key_values = decoder_outputs[1]
596
+ # The starting index of the remaining elements in `decoder_outputs`
597
+ start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])
598
+
599
+ if not isinstance(encoder_outputs, tuple):
600
+ encoder_outputs = encoder_outputs.to_tuple()
601
+ output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs
602
+ output = tuple([x for x in output if x is not None])
603
+ return output
604
+
605
+ return TFSeq2SeqLMOutput(
606
+ loss=loss,
607
+ logits=decoder_outputs.logits,
608
+ past_key_values=decoder_outputs.past_key_values,
609
+ decoder_hidden_states=decoder_outputs.hidden_states,
610
+ decoder_attentions=decoder_outputs.attentions,
611
+ cross_attentions=decoder_outputs.cross_attentions,
612
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
613
+ encoder_hidden_states=encoder_outputs.hidden_states,
614
+ encoder_attentions=encoder_outputs.attentions,
615
+ )
616
+
617
+ def prepare_inputs_for_generation(
618
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
619
+ ):
620
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
621
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
622
+ past_key_values = decoder_inputs.get("past_key_values")
623
+ if past_key_values is None:
624
+ past_key_values = decoder_inputs.get("past") # e.g. on TF GPT2
625
+ input_dict = {
626
+ "input_ids": None, # needs to be passed to make Keras.layer.__call__ happy
627
+ "attention_mask": attention_mask,
628
+ "decoder_attention_mask": decoder_attention_mask,
629
+ "decoder_input_ids": decoder_inputs["input_ids"],
630
+ # TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete
631
+ "encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]),
632
+ "past_key_values": past_key_values,
633
+ "use_cache": use_cache,
634
+ }
635
+ return input_dict
636
+
637
+ def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
638
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
639
+
640
+ def resize_token_embeddings(self, *args, **kwargs):
641
+ raise NotImplementedError(
642
+ "Resizing the embedding layers via the TFEncoderDecoderModel directly is not supported.Please use the"
643
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
644
+ " model.decoder.resize_token_embeddings(...))"
645
+ )
646
+
647
+ def _reorder_cache(self, past, beam_idx):
648
+ # apply decoder cache reordering here
649
+ return self.decoder._reorder_cache(past, beam_idx)
650
+
651
+ def build(self, input_shape=None):
652
+ if self.built:
653
+ return
654
+ self.built = True
655
+ if getattr(self, "enc_to_dec_proj", None) is not None:
656
+ with tf.name_scope(self.enc_to_dec_proj.name):
657
+ self.enc_to_dec_proj.build([None, None, self.encoder.config.hidden_size])
658
+ if getattr(self, "encoder", None) is not None:
659
+ with tf.name_scope(self.encoder.name):
660
+ self.encoder.build(None)
661
+ if getattr(self, "decoder", None) is not None:
662
+ with tf.name_scope(self.decoder.name):
663
+ self.decoder.build(None)
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/__init__.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_grounding_dino": [
22
+ "GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "GroundingDinoConfig",
24
+ ],
25
+ "processing_grounding_dino": ["GroundingDinoProcessor"],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_grounding_dino"] = [
35
+ "GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "GroundingDinoForObjectDetection",
37
+ "GroundingDinoModel",
38
+ "GroundingDinoPreTrainedModel",
39
+ ]
40
+
41
+ try:
42
+ if not is_vision_available():
43
+ raise OptionalDependencyNotAvailable()
44
+ except OptionalDependencyNotAvailable:
45
+ pass
46
+ else:
47
+ _import_structure["image_processing_grounding_dino"] = ["GroundingDinoImageProcessor"]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_grounding_dino import (
52
+ GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP,
53
+ GroundingDinoConfig,
54
+ )
55
+ from .processing_grounding_dino import GroundingDinoProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_grounding_dino import (
64
+ GROUNDING_DINO_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ GroundingDinoForObjectDetection,
66
+ GroundingDinoModel,
67
+ GroundingDinoPreTrainedModel,
68
+ )
69
+
70
+ try:
71
+ if not is_vision_available():
72
+ raise OptionalDependencyNotAvailable()
73
+ except OptionalDependencyNotAvailable:
74
+ pass
75
+ else:
76
+ from .image_processing_grounding_dino import GroundingDinoImageProcessor
77
+
78
+ else:
79
+ import sys
80
+
81
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/configuration_grounding_dino.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Grounding DINO model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ..auto import CONFIG_MAPPING
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ GROUNDING_DINO_PRETRAINED_CONFIG_ARCHIVE_MAP = {
25
+ "IDEA-Research/grounding-dino-tiny": "https://huggingface.co/IDEA-Research/grounding-dino-tiny/resolve/main/config.json",
26
+ }
27
+
28
+
29
+ class GroundingDinoConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`GroundingDinoModel`]. It is used to instantiate a
32
+ Grounding DINO model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the Grounding DINO
34
+ [IDEA-Research/grounding-dino-tiny](https://huggingface.co/IDEA-Research/grounding-dino-tiny) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
41
+ The configuration of the backbone model.
42
+ backbone (`str`, *optional*):
43
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
44
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
45
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
46
+ use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
47
+ Whether to use pretrained weights for the backbone.
48
+ use_timm_backbone (`bool`, *optional*, defaults to `False`):
49
+ Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
50
+ library.
51
+ backbone_kwargs (`dict`, *optional*):
52
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
53
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
54
+ text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`):
55
+ The config object or dictionary of the text backbone.
56
+ num_queries (`int`, *optional*, defaults to 900):
57
+ Number of object queries, i.e. detection slots. This is the maximal number of objects
58
+ [`GroundingDinoModel`] can detect in a single image.
59
+ encoder_layers (`int`, *optional*, defaults to 6):
60
+ Number of encoder layers.
61
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
62
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
63
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
64
+ Number of attention heads for each attention layer in the Transformer encoder.
65
+ decoder_layers (`int`, *optional*, defaults to 6):
66
+ Number of decoder layers.
67
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
68
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
69
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
70
+ Number of attention heads for each attention layer in the Transformer decoder.
71
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
72
+ Whether the model is used as an encoder/decoder or not.
73
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
74
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
75
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
76
+ d_model (`int`, *optional*, defaults to 256):
77
+ Dimension of the layers.
78
+ dropout (`float`, *optional*, defaults to 0.1):
79
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
80
+ attention_dropout (`float`, *optional*, defaults to 0.0):
81
+ The dropout ratio for the attention probabilities.
82
+ activation_dropout (`float`, *optional*, defaults to 0.0):
83
+ The dropout ratio for activations inside the fully connected layer.
84
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
85
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
86
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
87
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
88
+ num_feature_levels (`int`, *optional*, defaults to 4):
89
+ The number of input feature levels.
90
+ encoder_n_points (`int`, *optional*, defaults to 4):
91
+ The number of sampled keys in each feature level for each attention head in the encoder.
92
+ decoder_n_points (`int`, *optional*, defaults to 4):
93
+ The number of sampled keys in each feature level for each attention head in the decoder.
94
+ two_stage (`bool`, *optional*, defaults to `True`):
95
+ Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
96
+ Grounding DINO, which are further fed into the decoder for iterative bounding box refinement.
97
+ class_cost (`float`, *optional*, defaults to 1.0):
98
+ Relative weight of the classification error in the Hungarian matching cost.
99
+ bbox_cost (`float`, *optional*, defaults to 5.0):
100
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
101
+ giou_cost (`float`, *optional*, defaults to 2.0):
102
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
103
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5.0):
104
+ Relative weight of the L1 bounding box loss in the object detection loss.
105
+ giou_loss_coefficient (`float`, *optional*, defaults to 2.0):
106
+ Relative weight of the generalized IoU loss in the object detection loss.
107
+ focal_alpha (`float`, *optional*, defaults to 0.25):
108
+ Alpha parameter in the focal loss.
109
+ disable_custom_kernels (`bool`, *optional*, defaults to `False`):
110
+ Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
111
+ kernels are not supported by PyTorch ONNX export.
112
+ max_text_len (`int`, *optional*, defaults to 256):
113
+ The maximum length of the text input.
114
+ text_enhancer_dropout (`float`, *optional*, defaults to 0.0):
115
+ The dropout ratio for the text enhancer.
116
+ fusion_droppath (`float`, *optional*, defaults to 0.1):
117
+ The droppath ratio for the fusion module.
118
+ fusion_dropout (`float`, *optional*, defaults to 0.0):
119
+ The dropout ratio for the fusion module.
120
+ embedding_init_target (`bool`, *optional*, defaults to `True`):
121
+ Whether to initialize the target with Embedding weights.
122
+ query_dim (`int`, *optional*, defaults to 4):
123
+ The dimension of the query vector.
124
+ decoder_bbox_embed_share (`bool`, *optional*, defaults to `True`):
125
+ Whether to share the bbox regression head for all decoder layers.
126
+ two_stage_bbox_embed_share (`bool`, *optional*, defaults to `False`):
127
+ Whether to share the bbox embedding between the two-stage bbox generator and the region proposal
128
+ generation.
129
+ positional_embedding_temperature (`float`, *optional*, defaults to 20):
130
+ The temperature for Sine Positional Embedding that is used together with vision backbone.
131
+ init_std (`float`, *optional*, defaults to 0.02):
132
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
133
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
134
+ The epsilon used by the layer normalization layers.
135
+
136
+ Examples:
137
+
138
+ ```python
139
+ >>> from transformers import GroundingDinoConfig, GroundingDinoModel
140
+
141
+ >>> # Initializing a Grounding DINO IDEA-Research/grounding-dino-tiny style configuration
142
+ >>> configuration = GroundingDinoConfig()
143
+
144
+ >>> # Initializing a model (with random weights) from the IDEA-Research/grounding-dino-tiny style configuration
145
+ >>> model = GroundingDinoModel(configuration)
146
+
147
+ >>> # Accessing the model configuration
148
+ >>> configuration = model.config
149
+ ```"""
150
+
151
+ model_type = "grounding-dino"
152
+ attribute_map = {
153
+ "hidden_size": "d_model",
154
+ "num_attention_heads": "encoder_attention_heads",
155
+ }
156
+
157
+ def __init__(
158
+ self,
159
+ backbone_config=None,
160
+ backbone=None,
161
+ use_pretrained_backbone=False,
162
+ use_timm_backbone=False,
163
+ backbone_kwargs=None,
164
+ text_config=None,
165
+ num_queries=900,
166
+ encoder_layers=6,
167
+ encoder_ffn_dim=2048,
168
+ encoder_attention_heads=8,
169
+ decoder_layers=6,
170
+ decoder_ffn_dim=2048,
171
+ decoder_attention_heads=8,
172
+ is_encoder_decoder=True,
173
+ activation_function="relu",
174
+ d_model=256,
175
+ dropout=0.1,
176
+ attention_dropout=0.0,
177
+ activation_dropout=0.0,
178
+ auxiliary_loss=False,
179
+ position_embedding_type="sine",
180
+ num_feature_levels=4,
181
+ encoder_n_points=4,
182
+ decoder_n_points=4,
183
+ two_stage=True,
184
+ class_cost=1.0,
185
+ bbox_cost=5.0,
186
+ giou_cost=2.0,
187
+ bbox_loss_coefficient=5.0,
188
+ giou_loss_coefficient=2.0,
189
+ focal_alpha=0.25,
190
+ disable_custom_kernels=False,
191
+ # other parameters
192
+ max_text_len=256,
193
+ text_enhancer_dropout=0.0,
194
+ fusion_droppath=0.1,
195
+ fusion_dropout=0.0,
196
+ embedding_init_target=True,
197
+ query_dim=4,
198
+ decoder_bbox_embed_share=True,
199
+ two_stage_bbox_embed_share=False,
200
+ positional_embedding_temperature=20,
201
+ init_std=0.02,
202
+ layer_norm_eps=1e-5,
203
+ **kwargs,
204
+ ):
205
+ if not use_timm_backbone and use_pretrained_backbone:
206
+ raise ValueError(
207
+ "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`"
208
+ )
209
+
210
+ if backbone_config is not None and backbone is not None:
211
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
212
+
213
+ if backbone_config is None and backbone is None:
214
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.")
215
+ backbone_config = CONFIG_MAPPING["swin"](
216
+ window_size=7,
217
+ image_size=224,
218
+ embed_dim=96,
219
+ depths=[2, 2, 6, 2],
220
+ num_heads=[3, 6, 12, 24],
221
+ out_indices=[2, 3, 4],
222
+ )
223
+ elif isinstance(backbone_config, dict):
224
+ backbone_model_type = backbone_config.pop("model_type")
225
+ config_class = CONFIG_MAPPING[backbone_model_type]
226
+ backbone_config = config_class.from_dict(backbone_config)
227
+
228
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
229
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
230
+
231
+ if text_config is None:
232
+ text_config = {}
233
+ logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).")
234
+
235
+ self.backbone_config = backbone_config
236
+ self.backbone = backbone
237
+ self.use_pretrained_backbone = use_pretrained_backbone
238
+ self.use_timm_backbone = use_timm_backbone
239
+ self.backbone_kwargs = backbone_kwargs
240
+ self.num_queries = num_queries
241
+ self.d_model = d_model
242
+ self.encoder_ffn_dim = encoder_ffn_dim
243
+ self.encoder_layers = encoder_layers
244
+ self.encoder_attention_heads = encoder_attention_heads
245
+ self.decoder_ffn_dim = decoder_ffn_dim
246
+ self.decoder_layers = decoder_layers
247
+ self.decoder_attention_heads = decoder_attention_heads
248
+ self.dropout = dropout
249
+ self.attention_dropout = attention_dropout
250
+ self.activation_dropout = activation_dropout
251
+ self.activation_function = activation_function
252
+ self.auxiliary_loss = auxiliary_loss
253
+ self.position_embedding_type = position_embedding_type
254
+ # deformable attributes
255
+ self.num_feature_levels = num_feature_levels
256
+ self.encoder_n_points = encoder_n_points
257
+ self.decoder_n_points = decoder_n_points
258
+ self.two_stage = two_stage
259
+ # Hungarian matcher
260
+ self.class_cost = class_cost
261
+ self.bbox_cost = bbox_cost
262
+ self.giou_cost = giou_cost
263
+ # Loss coefficients
264
+ self.bbox_loss_coefficient = bbox_loss_coefficient
265
+ self.giou_loss_coefficient = giou_loss_coefficient
266
+ self.focal_alpha = focal_alpha
267
+ self.disable_custom_kernels = disable_custom_kernels
268
+ # Text backbone
269
+ if isinstance(text_config, dict):
270
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "bert"
271
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
272
+ elif text_config is None:
273
+ text_config = CONFIG_MAPPING["bert"]()
274
+
275
+ self.text_config = text_config
276
+ self.max_text_len = max_text_len
277
+
278
+ # Text Enhancer
279
+ self.text_enhancer_dropout = text_enhancer_dropout
280
+ # Fusion
281
+ self.fusion_droppath = fusion_droppath
282
+ self.fusion_dropout = fusion_dropout
283
+ # Others
284
+ self.embedding_init_target = embedding_init_target
285
+ self.query_dim = query_dim
286
+ self.decoder_bbox_embed_share = decoder_bbox_embed_share
287
+ self.two_stage_bbox_embed_share = two_stage_bbox_embed_share
288
+ if two_stage_bbox_embed_share and not decoder_bbox_embed_share:
289
+ raise ValueError("If two_stage_bbox_embed_share is True, decoder_bbox_embed_share must be True.")
290
+ self.positional_embedding_temperature = positional_embedding_temperature
291
+ self.init_std = init_std
292
+ self.layer_norm_eps = layer_norm_eps
293
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
294
+
295
+ @property
296
+ def num_attention_heads(self) -> int:
297
+ return self.encoder_attention_heads
298
+
299
+ @property
300
+ def hidden_size(self) -> int:
301
+ return self.d_model
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/convert_grounding_dino_to_hf.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Grounding DINO checkpoints from the original repository.
16
+
17
+ URL: https://github.com/IDEA-Research/GroundingDINO"""
18
+
19
+ import argparse
20
+
21
+ import requests
22
+ import torch
23
+ from PIL import Image
24
+ from torchvision import transforms as T
25
+
26
+ from transformers import (
27
+ AutoTokenizer,
28
+ GroundingDinoConfig,
29
+ GroundingDinoForObjectDetection,
30
+ GroundingDinoImageProcessor,
31
+ GroundingDinoProcessor,
32
+ SwinConfig,
33
+ )
34
+
35
+
36
+ IMAGENET_MEAN = [0.485, 0.456, 0.406]
37
+ IMAGENET_STD = [0.229, 0.224, 0.225]
38
+
39
+
40
+ def get_grounding_dino_config(model_name):
41
+ if "tiny" in model_name:
42
+ window_size = 7
43
+ embed_dim = 96
44
+ depths = (2, 2, 6, 2)
45
+ num_heads = (3, 6, 12, 24)
46
+ image_size = 224
47
+ elif "base" in model_name:
48
+ window_size = 12
49
+ embed_dim = 128
50
+ depths = (2, 2, 18, 2)
51
+ num_heads = (4, 8, 16, 32)
52
+ image_size = 384
53
+ else:
54
+ raise ValueError("Model not supported, only supports base and large variants")
55
+
56
+ backbone_config = SwinConfig(
57
+ window_size=window_size,
58
+ image_size=image_size,
59
+ embed_dim=embed_dim,
60
+ depths=depths,
61
+ num_heads=num_heads,
62
+ out_indices=[2, 3, 4],
63
+ )
64
+
65
+ config = GroundingDinoConfig(backbone_config=backbone_config)
66
+
67
+ return config
68
+
69
+
70
+ def create_rename_keys(state_dict, config):
71
+ rename_keys = []
72
+ # fmt: off
73
+ ########################################## VISION BACKBONE - START
74
+ # patch embedding layer
75
+ rename_keys.append(("backbone.0.patch_embed.proj.weight",
76
+ "model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.weight"))
77
+ rename_keys.append(("backbone.0.patch_embed.proj.bias",
78
+ "model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.bias"))
79
+ rename_keys.append(("backbone.0.patch_embed.norm.weight",
80
+ "model.backbone.conv_encoder.model.embeddings.norm.weight"))
81
+ rename_keys.append(("backbone.0.patch_embed.norm.bias",
82
+ "model.backbone.conv_encoder.model.embeddings.norm.bias"))
83
+
84
+ for layer, depth in enumerate(config.backbone_config.depths):
85
+ for block in range(depth):
86
+ # layernorms
87
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.norm1.weight",
88
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_before.weight"))
89
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.norm1.bias",
90
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_before.bias"))
91
+
92
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.norm2.weight",
93
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_after.weight"))
94
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.norm2.bias",
95
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_after.bias"))
96
+ # attention
97
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.attn.relative_position_bias_table",
98
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.relative_position_bias_table"))
99
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.attn.proj.weight",
100
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.output.dense.weight"))
101
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.attn.proj.bias",
102
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.output.dense.bias"))
103
+ # intermediate
104
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.mlp.fc1.weight",
105
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.intermediate.dense.weight"))
106
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.mlp.fc1.bias",
107
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.intermediate.dense.bias"))
108
+
109
+ # output
110
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.mlp.fc2.weight",
111
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.output.dense.weight"))
112
+ rename_keys.append((f"backbone.0.layers.{layer}.blocks.{block}.mlp.fc2.bias",
113
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.output.dense.bias"))
114
+
115
+ # downsample
116
+ if layer!=len(config.backbone_config.depths)-1:
117
+ rename_keys.append((f"backbone.0.layers.{layer}.downsample.reduction.weight",
118
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.reduction.weight"))
119
+ rename_keys.append((f"backbone.0.layers.{layer}.downsample.norm.weight",
120
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.norm.weight"))
121
+ rename_keys.append((f"backbone.0.layers.{layer}.downsample.norm.bias",
122
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.norm.bias"))
123
+
124
+ for out_indice in config.backbone_config.out_indices:
125
+ # Grounding DINO implementation of out_indices isn't aligned with transformers
126
+ rename_keys.append((f"backbone.0.norm{out_indice-1}.weight",
127
+ f"model.backbone.conv_encoder.model.hidden_states_norms.stage{out_indice}.weight"))
128
+ rename_keys.append((f"backbone.0.norm{out_indice-1}.bias",
129
+ f"model.backbone.conv_encoder.model.hidden_states_norms.stage{out_indice}.bias"))
130
+
131
+ ########################################## VISION BACKBONE - END
132
+
133
+ ########################################## ENCODER - START
134
+ deformable_key_mappings = {
135
+ 'self_attn.sampling_offsets.weight': 'deformable_layer.self_attn.sampling_offsets.weight',
136
+ 'self_attn.sampling_offsets.bias': 'deformable_layer.self_attn.sampling_offsets.bias',
137
+ 'self_attn.attention_weights.weight': 'deformable_layer.self_attn.attention_weights.weight',
138
+ 'self_attn.attention_weights.bias': 'deformable_layer.self_attn.attention_weights.bias',
139
+ 'self_attn.value_proj.weight': 'deformable_layer.self_attn.value_proj.weight',
140
+ 'self_attn.value_proj.bias': 'deformable_layer.self_attn.value_proj.bias',
141
+ 'self_attn.output_proj.weight': 'deformable_layer.self_attn.output_proj.weight',
142
+ 'self_attn.output_proj.bias': 'deformable_layer.self_attn.output_proj.bias',
143
+ 'norm1.weight': 'deformable_layer.self_attn_layer_norm.weight',
144
+ 'norm1.bias': 'deformable_layer.self_attn_layer_norm.bias',
145
+ 'linear1.weight': 'deformable_layer.fc1.weight',
146
+ 'linear1.bias': 'deformable_layer.fc1.bias',
147
+ 'linear2.weight': 'deformable_layer.fc2.weight',
148
+ 'linear2.bias': 'deformable_layer.fc2.bias',
149
+ 'norm2.weight': 'deformable_layer.final_layer_norm.weight',
150
+ 'norm2.bias': 'deformable_layer.final_layer_norm.bias',
151
+ }
152
+ text_enhancer_key_mappings = {
153
+ 'self_attn.in_proj_weight': 'text_enhancer_layer.self_attn.in_proj_weight',
154
+ 'self_attn.in_proj_bias': 'text_enhancer_layer.self_attn.in_proj_bias',
155
+ 'self_attn.out_proj.weight': 'text_enhancer_layer.self_attn.out_proj.weight',
156
+ 'self_attn.out_proj.bias': 'text_enhancer_layer.self_attn.out_proj.bias',
157
+ 'linear1.weight': 'text_enhancer_layer.fc1.weight',
158
+ 'linear1.bias': 'text_enhancer_layer.fc1.bias',
159
+ 'linear2.weight': 'text_enhancer_layer.fc2.weight',
160
+ 'linear2.bias': 'text_enhancer_layer.fc2.bias',
161
+ 'norm1.weight': 'text_enhancer_layer.layer_norm_before.weight',
162
+ 'norm1.bias': 'text_enhancer_layer.layer_norm_before.bias',
163
+ 'norm2.weight': 'text_enhancer_layer.layer_norm_after.weight',
164
+ 'norm2.bias': 'text_enhancer_layer.layer_norm_after.bias',
165
+ }
166
+ fusion_key_mappings = {
167
+ 'gamma_v': 'fusion_layer.vision_param',
168
+ 'gamma_l': 'fusion_layer.text_param',
169
+ 'layer_norm_v.weight': 'fusion_layer.layer_norm_vision.weight',
170
+ 'layer_norm_v.bias': 'fusion_layer.layer_norm_vision.bias',
171
+ 'layer_norm_l.weight': 'fusion_layer.layer_norm_text.weight',
172
+ 'layer_norm_l.bias': 'fusion_layer.layer_norm_text.bias',
173
+ 'attn.v_proj.weight': 'fusion_layer.attn.vision_proj.weight',
174
+ 'attn.v_proj.bias': 'fusion_layer.attn.vision_proj.bias',
175
+ 'attn.l_proj.weight': 'fusion_layer.attn.text_proj.weight',
176
+ 'attn.l_proj.bias': 'fusion_layer.attn.text_proj.bias',
177
+ 'attn.values_v_proj.weight': 'fusion_layer.attn.values_vision_proj.weight',
178
+ 'attn.values_v_proj.bias': 'fusion_layer.attn.values_vision_proj.bias',
179
+ 'attn.values_l_proj.weight': 'fusion_layer.attn.values_text_proj.weight',
180
+ 'attn.values_l_proj.bias': 'fusion_layer.attn.values_text_proj.bias',
181
+ 'attn.out_v_proj.weight': 'fusion_layer.attn.out_vision_proj.weight',
182
+ 'attn.out_v_proj.bias': 'fusion_layer.attn.out_vision_proj.bias',
183
+ 'attn.out_l_proj.weight': 'fusion_layer.attn.out_text_proj.weight',
184
+ 'attn.out_l_proj.bias': 'fusion_layer.attn.out_text_proj.bias',
185
+ }
186
+ for layer in range(config.encoder_layers):
187
+ # deformable
188
+ for src, dest in deformable_key_mappings.items():
189
+ rename_keys.append((f"transformer.encoder.layers.{layer}.{src}",
190
+ f"model.encoder.layers.{layer}.{dest}"))
191
+ # text enhance
192
+ for src, dest in text_enhancer_key_mappings.items():
193
+ rename_keys.append((f"transformer.encoder.text_layers.{layer}.{src}",
194
+ f"model.encoder.layers.{layer}.{dest}"))
195
+ # fusion layers
196
+ for src, dest in fusion_key_mappings.items():
197
+ rename_keys.append((f"transformer.encoder.fusion_layers.{layer}.{src}",
198
+ f"model.encoder.layers.{layer}.{dest}"))
199
+ ########################################## ENCODER - END
200
+
201
+ ########################################## DECODER - START
202
+ key_mappings_decoder = {
203
+ 'cross_attn.sampling_offsets.weight': 'encoder_attn.sampling_offsets.weight',
204
+ 'cross_attn.sampling_offsets.bias': 'encoder_attn.sampling_offsets.bias',
205
+ 'cross_attn.attention_weights.weight': 'encoder_attn.attention_weights.weight',
206
+ 'cross_attn.attention_weights.bias': 'encoder_attn.attention_weights.bias',
207
+ 'cross_attn.value_proj.weight': 'encoder_attn.value_proj.weight',
208
+ 'cross_attn.value_proj.bias': 'encoder_attn.value_proj.bias',
209
+ 'cross_attn.output_proj.weight': 'encoder_attn.output_proj.weight',
210
+ 'cross_attn.output_proj.bias': 'encoder_attn.output_proj.bias',
211
+ 'norm1.weight': 'encoder_attn_layer_norm.weight',
212
+ 'norm1.bias': 'encoder_attn_layer_norm.bias',
213
+ 'ca_text.in_proj_weight': 'encoder_attn_text.in_proj_weight',
214
+ 'ca_text.in_proj_bias': 'encoder_attn_text.in_proj_bias',
215
+ 'ca_text.out_proj.weight': 'encoder_attn_text.out_proj.weight',
216
+ 'ca_text.out_proj.bias': 'encoder_attn_text.out_proj.bias',
217
+ 'catext_norm.weight': 'encoder_attn_text_layer_norm.weight',
218
+ 'catext_norm.bias': 'encoder_attn_text_layer_norm.bias',
219
+ 'self_attn.in_proj_weight': 'self_attn.in_proj_weight',
220
+ 'self_attn.in_proj_bias': 'self_attn.in_proj_bias',
221
+ 'self_attn.out_proj.weight': 'self_attn.out_proj.weight',
222
+ 'self_attn.out_proj.bias': 'self_attn.out_proj.bias',
223
+ 'norm2.weight': 'self_attn_layer_norm.weight',
224
+ 'norm2.bias': 'self_attn_layer_norm.bias',
225
+ 'linear1.weight': 'fc1.weight',
226
+ 'linear1.bias': 'fc1.bias',
227
+ 'linear2.weight': 'fc2.weight',
228
+ 'linear2.bias': 'fc2.bias',
229
+ 'norm3.weight': 'final_layer_norm.weight',
230
+ 'norm3.bias': 'final_layer_norm.bias',
231
+ }
232
+ for layer_num in range(config.decoder_layers):
233
+ source_prefix_decoder = f'transformer.decoder.layers.{layer_num}.'
234
+ target_prefix_decoder = f'model.decoder.layers.{layer_num}.'
235
+
236
+ for source_name, target_name in key_mappings_decoder.items():
237
+ rename_keys.append((source_prefix_decoder + source_name,
238
+ target_prefix_decoder + target_name))
239
+ ########################################## DECODER - END
240
+
241
+ ########################################## Additional - START
242
+ for layer_name, params in state_dict.items():
243
+ #### TEXT BACKBONE
244
+ if "bert" in layer_name:
245
+ rename_keys.append((layer_name, layer_name.replace("bert", "model.text_backbone")))
246
+ #### INPUT PROJ - PROJECT OUTPUT FEATURES FROM VISION BACKBONE
247
+ if "input_proj" in layer_name:
248
+ rename_keys.append((layer_name, layer_name.replace("input_proj", "model.input_proj_vision")))
249
+ #### INPUT PROJ - PROJECT OUTPUT FEATURES FROM TEXT BACKBONE
250
+ if "feat_map" in layer_name:
251
+ rename_keys.append((layer_name, layer_name.replace("feat_map", "model.text_projection")))
252
+ #### DECODER REFERENCE POINT HEAD
253
+ if "transformer.decoder.ref_point_head" in layer_name:
254
+ rename_keys.append((layer_name, layer_name.replace("transformer.decoder.ref_point_head",
255
+ "model.decoder.reference_points_head")))
256
+ #### DECODER BBOX EMBED
257
+ if "transformer.decoder.bbox_embed" in layer_name:
258
+ rename_keys.append((layer_name, layer_name.replace("transformer.decoder.bbox_embed",
259
+ "model.decoder.bbox_embed")))
260
+ if "transformer.enc_output" in layer_name:
261
+ rename_keys.append((layer_name, layer_name.replace("transformer", "model")))
262
+
263
+ if "transformer.enc_out_bbox_embed" in layer_name:
264
+ rename_keys.append((layer_name, layer_name.replace("transformer.enc_out_bbox_embed",
265
+ "model.encoder_output_bbox_embed")))
266
+
267
+ rename_keys.append(("transformer.level_embed", "model.level_embed"))
268
+ rename_keys.append(("transformer.decoder.norm.weight", "model.decoder.layer_norm.weight"))
269
+ rename_keys.append(("transformer.decoder.norm.bias", "model.decoder.layer_norm.bias"))
270
+ rename_keys.append(("transformer.tgt_embed.weight", "model.query_position_embeddings.weight"))
271
+ ########################################## Additional - END
272
+
273
+ # fmt: on
274
+ return rename_keys
275
+
276
+
277
+ def rename_key(dct, old, new):
278
+ val = dct.pop(old)
279
+ dct[new] = val
280
+
281
+
282
+ # we split up the matrix of each encoder layer into queries, keys and values
283
+ def read_in_q_k_v_encoder(state_dict, config):
284
+ ########################################## VISION BACKBONE - START
285
+ embed_dim = config.backbone_config.embed_dim
286
+ for layer, depth in enumerate(config.backbone_config.depths):
287
+ hidden_size = embed_dim * 2**layer
288
+ for block in range(depth):
289
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
290
+ in_proj_weight = state_dict.pop(f"backbone.0.layers.{layer}.blocks.{block}.attn.qkv.weight")
291
+ in_proj_bias = state_dict.pop(f"backbone.0.layers.{layer}.blocks.{block}.attn.qkv.bias")
292
+ # next, add query, keys and values (in that order) to the state dict
293
+ state_dict[
294
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.query.weight"
295
+ ] = in_proj_weight[:hidden_size, :]
296
+ state_dict[
297
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.query.bias"
298
+ ] = in_proj_bias[:hidden_size]
299
+
300
+ state_dict[
301
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.key.weight"
302
+ ] = in_proj_weight[hidden_size : hidden_size * 2, :]
303
+ state_dict[
304
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.key.bias"
305
+ ] = in_proj_bias[hidden_size : hidden_size * 2]
306
+
307
+ state_dict[
308
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.value.weight"
309
+ ] = in_proj_weight[-hidden_size:, :]
310
+ state_dict[
311
+ f"model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.value.bias"
312
+ ] = in_proj_bias[-hidden_size:]
313
+ ########################################## VISION BACKBONE - END
314
+
315
+
316
+ def read_in_q_k_v_text_enhancer(state_dict, config):
317
+ hidden_size = config.hidden_size
318
+ for idx in range(config.encoder_layers):
319
+ # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
320
+ in_proj_weight = state_dict.pop(f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.in_proj_weight")
321
+ in_proj_bias = state_dict.pop(f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.in_proj_bias")
322
+ # next, add query, keys and values (in that order) to the state dict
323
+ state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.query.weight"] = in_proj_weight[
324
+ :hidden_size, :
325
+ ]
326
+ state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.query.bias"] = in_proj_bias[:hidden_size]
327
+
328
+ state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.key.weight"] = in_proj_weight[
329
+ hidden_size : hidden_size * 2, :
330
+ ]
331
+ state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.key.bias"] = in_proj_bias[
332
+ hidden_size : hidden_size * 2
333
+ ]
334
+
335
+ state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.value.weight"] = in_proj_weight[
336
+ -hidden_size:, :
337
+ ]
338
+ state_dict[f"model.encoder.layers.{idx}.text_enhancer_layer.self_attn.value.bias"] = in_proj_bias[
339
+ -hidden_size:
340
+ ]
341
+
342
+
343
+ def read_in_q_k_v_decoder(state_dict, config):
344
+ hidden_size = config.hidden_size
345
+ for idx in range(config.decoder_layers):
346
+ # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
347
+ in_proj_weight = state_dict.pop(f"model.decoder.layers.{idx}.self_attn.in_proj_weight")
348
+ in_proj_bias = state_dict.pop(f"model.decoder.layers.{idx}.self_attn.in_proj_bias")
349
+ # next, add query, keys and values (in that order) to the state dict
350
+ state_dict[f"model.decoder.layers.{idx}.self_attn.query.weight"] = in_proj_weight[:hidden_size, :]
351
+ state_dict[f"model.decoder.layers.{idx}.self_attn.query.bias"] = in_proj_bias[:hidden_size]
352
+
353
+ state_dict[f"model.decoder.layers.{idx}.self_attn.key.weight"] = in_proj_weight[
354
+ hidden_size : hidden_size * 2, :
355
+ ]
356
+ state_dict[f"model.decoder.layers.{idx}.self_attn.key.bias"] = in_proj_bias[hidden_size : hidden_size * 2]
357
+
358
+ state_dict[f"model.decoder.layers.{idx}.self_attn.value.weight"] = in_proj_weight[-hidden_size:, :]
359
+ state_dict[f"model.decoder.layers.{idx}.self_attn.value.bias"] = in_proj_bias[-hidden_size:]
360
+
361
+ # read in weights + bias of cross-attention
362
+ in_proj_weight = state_dict.pop(f"model.decoder.layers.{idx}.encoder_attn_text.in_proj_weight")
363
+ in_proj_bias = state_dict.pop(f"model.decoder.layers.{idx}.encoder_attn_text.in_proj_bias")
364
+
365
+ # next, add query, keys and values (in that order) to the state dict
366
+ state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.query.weight"] = in_proj_weight[:hidden_size, :]
367
+ state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.query.bias"] = in_proj_bias[:hidden_size]
368
+
369
+ state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.key.weight"] = in_proj_weight[
370
+ hidden_size : hidden_size * 2, :
371
+ ]
372
+ state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.key.bias"] = in_proj_bias[
373
+ hidden_size : hidden_size * 2
374
+ ]
375
+
376
+ state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.value.weight"] = in_proj_weight[-hidden_size:, :]
377
+ state_dict[f"model.decoder.layers.{idx}.encoder_attn_text.value.bias"] = in_proj_bias[-hidden_size:]
378
+
379
+
380
+ # We will verify our results on an image of cute cats
381
+ def prepare_img():
382
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
383
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
384
+ return image
385
+
386
+
387
+ def preprocess_caption(caption: str) -> str:
388
+ result = caption.lower().strip()
389
+ if result.endswith("."):
390
+ return result
391
+ return result + "."
392
+
393
+
394
+ @torch.no_grad()
395
+ def convert_grounding_dino_checkpoint(args):
396
+ model_name = args.model_name
397
+ pytorch_dump_folder_path = args.pytorch_dump_folder_path
398
+ push_to_hub = args.push_to_hub
399
+ verify_logits = args.verify_logits
400
+
401
+ checkpoint_mapping = {
402
+ "grounding-dino-tiny": "https://huggingface.co/ShilongLiu/GroundingDino/resolve/main/groundingdino_swint_ogc.pth",
403
+ "grounding-dino-base": "https://huggingface.co/ShilongLiu/GroundingDino/resolve/main/groundingdino_swinb_cogcoor.pth",
404
+ }
405
+ # Define default GroundingDino configuation
406
+ config = get_grounding_dino_config(model_name)
407
+
408
+ # Load original checkpoint
409
+ checkpoint_url = checkpoint_mapping[model_name]
410
+ original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
411
+ original_state_dict = {k.replace("module.", ""): v for k, v in original_state_dict.items()}
412
+
413
+ for name, param in original_state_dict.items():
414
+ print(name, param.shape)
415
+
416
+ # Rename keys
417
+ new_state_dict = original_state_dict.copy()
418
+ rename_keys = create_rename_keys(original_state_dict, config)
419
+
420
+ for src, dest in rename_keys:
421
+ rename_key(new_state_dict, src, dest)
422
+ read_in_q_k_v_encoder(new_state_dict, config)
423
+ read_in_q_k_v_text_enhancer(new_state_dict, config)
424
+ read_in_q_k_v_decoder(new_state_dict, config)
425
+
426
+ # Load HF model
427
+ model = GroundingDinoForObjectDetection(config)
428
+ model.eval()
429
+ missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
430
+ print("Missing keys:", missing_keys)
431
+ print("Unexpected keys:", unexpected_keys)
432
+
433
+ # Load and process test image
434
+ image = prepare_img()
435
+ transforms = T.Compose([T.Resize(size=800, max_size=1333), T.ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
436
+ original_pixel_values = transforms(image).unsqueeze(0)
437
+
438
+ image_processor = GroundingDinoImageProcessor()
439
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
440
+ processor = GroundingDinoProcessor(image_processor=image_processor, tokenizer=tokenizer)
441
+
442
+ text = "a cat"
443
+ inputs = processor(images=image, text=preprocess_caption(text), return_tensors="pt")
444
+
445
+ assert torch.allclose(original_pixel_values, inputs.pixel_values, atol=1e-4)
446
+
447
+ if verify_logits:
448
+ # Running forward
449
+ with torch.no_grad():
450
+ outputs = model(**inputs)
451
+
452
+ print(outputs.logits[0, :3, :3])
453
+
454
+ expected_slice = torch.tensor(
455
+ [[-4.8913, -0.1900, -0.2161], [-4.9653, -0.3719, -0.3950], [-5.9599, -3.3765, -3.3104]]
456
+ )
457
+
458
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)
459
+ print("Looks ok!")
460
+
461
+ if pytorch_dump_folder_path is not None:
462
+ model.save_pretrained(pytorch_dump_folder_path)
463
+ processor.save_pretrained(pytorch_dump_folder_path)
464
+
465
+ if push_to_hub:
466
+ model.push_to_hub(f"EduardoPacheco/{model_name}")
467
+ processor.push_to_hub(f"EduardoPacheco/{model_name}")
468
+
469
+
470
+ if __name__ == "__main__":
471
+ parser = argparse.ArgumentParser()
472
+ # Required parameters
473
+ parser.add_argument(
474
+ "--model_name",
475
+ default="grounding-dino-tiny",
476
+ type=str,
477
+ choices=["grounding-dino-tiny", "grounding-dino-base"],
478
+ help="Name of the GroundingDino model you'd like to convert.",
479
+ )
480
+ parser.add_argument(
481
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
482
+ )
483
+ parser.add_argument(
484
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
485
+ )
486
+ parser.add_argument(
487
+ "--verify_logits", action="store_false", help="Whether or not to verify logits after conversion."
488
+ )
489
+
490
+ args = parser.parse_args()
491
+ convert_grounding_dino_checkpoint(args)
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/image_processing_grounding_dino.py ADDED
@@ -0,0 +1,1511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Deformable DETR."""
16
+
17
+ import io
18
+ import pathlib
19
+ from collections import defaultdict
20
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...feature_extraction_utils import BatchFeature
25
+ from ...image_processing_utils import BaseImageProcessor, get_size_dict
26
+ from ...image_transforms import (
27
+ PaddingMode,
28
+ center_to_corners_format,
29
+ corners_to_center_format,
30
+ id_to_rgb,
31
+ pad,
32
+ rescale,
33
+ resize,
34
+ rgb_to_id,
35
+ to_channel_dimension_format,
36
+ )
37
+ from ...image_utils import (
38
+ IMAGENET_DEFAULT_MEAN,
39
+ IMAGENET_DEFAULT_STD,
40
+ ChannelDimension,
41
+ ImageInput,
42
+ PILImageResampling,
43
+ get_image_size,
44
+ infer_channel_dimension_format,
45
+ is_scaled_image,
46
+ make_list_of_images,
47
+ to_numpy_array,
48
+ valid_images,
49
+ validate_annotations,
50
+ validate_kwargs,
51
+ validate_preprocess_arguments,
52
+ )
53
+ from ...utils import (
54
+ ExplicitEnum,
55
+ TensorType,
56
+ is_flax_available,
57
+ is_jax_tensor,
58
+ is_scipy_available,
59
+ is_tf_available,
60
+ is_tf_tensor,
61
+ is_torch_available,
62
+ is_torch_tensor,
63
+ is_vision_available,
64
+ logging,
65
+ )
66
+
67
+
68
+ if is_torch_available():
69
+ import torch
70
+ from torch import nn
71
+
72
+
73
+ if is_vision_available():
74
+ import PIL
75
+
76
+ if is_scipy_available():
77
+ import scipy.special
78
+ import scipy.stats
79
+
80
+
81
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
82
+
83
+ AnnotationType = Dict[str, Union[int, str, List[Dict]]]
84
+
85
+
86
+ class AnnotationFormat(ExplicitEnum):
87
+ COCO_DETECTION = "coco_detection"
88
+ COCO_PANOPTIC = "coco_panoptic"
89
+
90
+
91
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
92
+
93
+
94
+ # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
95
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
96
+ """
97
+ Computes the output image size given the input image size and the desired output size.
98
+
99
+ Args:
100
+ image_size (`Tuple[int, int]`):
101
+ The input image size.
102
+ size (`int`):
103
+ The desired output size.
104
+ max_size (`int`, *optional*):
105
+ The maximum allowed output size.
106
+ """
107
+ height, width = image_size
108
+ if max_size is not None:
109
+ min_original_size = float(min((height, width)))
110
+ max_original_size = float(max((height, width)))
111
+ if max_original_size / min_original_size * size > max_size:
112
+ size = int(round(max_size * min_original_size / max_original_size))
113
+
114
+ if (height <= width and height == size) or (width <= height and width == size):
115
+ return height, width
116
+
117
+ if width < height:
118
+ ow = size
119
+ oh = int(size * height / width)
120
+ else:
121
+ oh = size
122
+ ow = int(size * width / height)
123
+ return (oh, ow)
124
+
125
+
126
+ # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
127
+ def get_resize_output_image_size(
128
+ input_image: np.ndarray,
129
+ size: Union[int, Tuple[int, int], List[int]],
130
+ max_size: Optional[int] = None,
131
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
132
+ ) -> Tuple[int, int]:
133
+ """
134
+ Computes the output image size given the input image size and the desired output size. If the desired output size
135
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
136
+ image size is computed by keeping the aspect ratio of the input image size.
137
+
138
+ Args:
139
+ input_image (`np.ndarray`):
140
+ The image to resize.
141
+ size (`int` or `Tuple[int, int]` or `List[int]`):
142
+ The desired output size.
143
+ max_size (`int`, *optional*):
144
+ The maximum allowed output size.
145
+ input_data_format (`ChannelDimension` or `str`, *optional*):
146
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
147
+ """
148
+ image_size = get_image_size(input_image, input_data_format)
149
+ if isinstance(size, (list, tuple)):
150
+ return size
151
+
152
+ return get_size_with_aspect_ratio(image_size, size, max_size)
153
+
154
+
155
+ # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
156
+ def get_numpy_to_framework_fn(arr) -> Callable:
157
+ """
158
+ Returns a function that converts a numpy array to the framework of the input array.
159
+
160
+ Args:
161
+ arr (`np.ndarray`): The array to convert.
162
+ """
163
+ if isinstance(arr, np.ndarray):
164
+ return np.array
165
+ if is_tf_available() and is_tf_tensor(arr):
166
+ import tensorflow as tf
167
+
168
+ return tf.convert_to_tensor
169
+ if is_torch_available() and is_torch_tensor(arr):
170
+ import torch
171
+
172
+ return torch.tensor
173
+ if is_flax_available() and is_jax_tensor(arr):
174
+ import jax.numpy as jnp
175
+
176
+ return jnp.array
177
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
178
+
179
+
180
+ # Copied from transformers.models.detr.image_processing_detr.safe_squeeze
181
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
182
+ """
183
+ Squeezes an array, but only if the axis specified has dim 1.
184
+ """
185
+ if axis is None:
186
+ return arr.squeeze()
187
+
188
+ try:
189
+ return arr.squeeze(axis=axis)
190
+ except ValueError:
191
+ return arr
192
+
193
+
194
+ # Copied from transformers.models.detr.image_processing_detr.normalize_annotation
195
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
196
+ image_height, image_width = image_size
197
+ norm_annotation = {}
198
+ for key, value in annotation.items():
199
+ if key == "boxes":
200
+ boxes = value
201
+ boxes = corners_to_center_format(boxes)
202
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
203
+ norm_annotation[key] = boxes
204
+ else:
205
+ norm_annotation[key] = value
206
+ return norm_annotation
207
+
208
+
209
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
210
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
211
+ """
212
+ Return the maximum value across all indices of an iterable of values.
213
+ """
214
+ return [max(values_i) for values_i in zip(*values)]
215
+
216
+
217
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
218
+ def get_max_height_width(
219
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
220
+ ) -> List[int]:
221
+ """
222
+ Get the maximum height and width across all images in a batch.
223
+ """
224
+ if input_data_format is None:
225
+ input_data_format = infer_channel_dimension_format(images[0])
226
+
227
+ if input_data_format == ChannelDimension.FIRST:
228
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
229
+ elif input_data_format == ChannelDimension.LAST:
230
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
231
+ else:
232
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
233
+ return (max_height, max_width)
234
+
235
+
236
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
237
+ def make_pixel_mask(
238
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
239
+ ) -> np.ndarray:
240
+ """
241
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
242
+
243
+ Args:
244
+ image (`np.ndarray`):
245
+ Image to make the pixel mask for.
246
+ output_size (`Tuple[int, int]`):
247
+ Output size of the mask.
248
+ """
249
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
250
+ mask = np.zeros(output_size, dtype=np.int64)
251
+ mask[:input_height, :input_width] = 1
252
+ return mask
253
+
254
+
255
+ # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
256
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
257
+ """
258
+ Convert a COCO polygon annotation to a mask.
259
+
260
+ Args:
261
+ segmentations (`List[List[float]]`):
262
+ List of polygons, each polygon represented by a list of x-y coordinates.
263
+ height (`int`):
264
+ Height of the mask.
265
+ width (`int`):
266
+ Width of the mask.
267
+ """
268
+ try:
269
+ from pycocotools import mask as coco_mask
270
+ except ImportError:
271
+ raise ImportError("Pycocotools is not installed in your environment.")
272
+
273
+ masks = []
274
+ for polygons in segmentations:
275
+ rles = coco_mask.frPyObjects(polygons, height, width)
276
+ mask = coco_mask.decode(rles)
277
+ if len(mask.shape) < 3:
278
+ mask = mask[..., None]
279
+ mask = np.asarray(mask, dtype=np.uint8)
280
+ mask = np.any(mask, axis=2)
281
+ masks.append(mask)
282
+ if masks:
283
+ masks = np.stack(masks, axis=0)
284
+ else:
285
+ masks = np.zeros((0, height, width), dtype=np.uint8)
286
+
287
+ return masks
288
+
289
+
290
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->GroundingDino
291
+ def prepare_coco_detection_annotation(
292
+ image,
293
+ target,
294
+ return_segmentation_masks: bool = False,
295
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
296
+ ):
297
+ """
298
+ Convert the target in COCO format into the format expected by GroundingDino.
299
+ """
300
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
301
+
302
+ image_id = target["image_id"]
303
+ image_id = np.asarray([image_id], dtype=np.int64)
304
+
305
+ # Get all COCO annotations for the given image.
306
+ annotations = target["annotations"]
307
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
308
+
309
+ classes = [obj["category_id"] for obj in annotations]
310
+ classes = np.asarray(classes, dtype=np.int64)
311
+
312
+ # for conversion to coco api
313
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
314
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
315
+
316
+ boxes = [obj["bbox"] for obj in annotations]
317
+ # guard against no boxes via resizing
318
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
319
+ boxes[:, 2:] += boxes[:, :2]
320
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
321
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
322
+
323
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
324
+
325
+ new_target = {}
326
+ new_target["image_id"] = image_id
327
+ new_target["class_labels"] = classes[keep]
328
+ new_target["boxes"] = boxes[keep]
329
+ new_target["area"] = area[keep]
330
+ new_target["iscrowd"] = iscrowd[keep]
331
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
332
+
333
+ if annotations and "keypoints" in annotations[0]:
334
+ keypoints = [obj["keypoints"] for obj in annotations]
335
+ # Converting the filtered keypoints list to a numpy array
336
+ keypoints = np.asarray(keypoints, dtype=np.float32)
337
+ # Apply the keep mask here to filter the relevant annotations
338
+ keypoints = keypoints[keep]
339
+ num_keypoints = keypoints.shape[0]
340
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
341
+ new_target["keypoints"] = keypoints
342
+
343
+ if return_segmentation_masks:
344
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
345
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
346
+ new_target["masks"] = masks[keep]
347
+
348
+ return new_target
349
+
350
+
351
+ # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
352
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
353
+ """
354
+ Compute the bounding boxes around the provided panoptic segmentation masks.
355
+
356
+ Args:
357
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
358
+
359
+ Returns:
360
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
361
+ """
362
+ if masks.size == 0:
363
+ return np.zeros((0, 4))
364
+
365
+ h, w = masks.shape[-2:]
366
+ y = np.arange(0, h, dtype=np.float32)
367
+ x = np.arange(0, w, dtype=np.float32)
368
+ # see https://github.com/pytorch/pytorch/issues/50276
369
+ y, x = np.meshgrid(y, x, indexing="ij")
370
+
371
+ x_mask = masks * np.expand_dims(x, axis=0)
372
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
373
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
374
+ x_min = x.filled(fill_value=1e8)
375
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
376
+
377
+ y_mask = masks * np.expand_dims(y, axis=0)
378
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
379
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
380
+ y_min = y.filled(fill_value=1e8)
381
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
382
+
383
+ return np.stack([x_min, y_min, x_max, y_max], 1)
384
+
385
+
386
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->GroundingDino
387
+ def prepare_coco_panoptic_annotation(
388
+ image: np.ndarray,
389
+ target: Dict,
390
+ masks_path: Union[str, pathlib.Path],
391
+ return_masks: bool = True,
392
+ input_data_format: Union[ChannelDimension, str] = None,
393
+ ) -> Dict:
394
+ """
395
+ Prepare a coco panoptic annotation for GroundingDino.
396
+ """
397
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
398
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
399
+
400
+ new_target = {}
401
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
402
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
403
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
404
+
405
+ if "segments_info" in target:
406
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
407
+ masks = rgb_to_id(masks)
408
+
409
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
410
+ masks = masks == ids[:, None, None]
411
+ masks = masks.astype(np.uint8)
412
+ if return_masks:
413
+ new_target["masks"] = masks
414
+ new_target["boxes"] = masks_to_boxes(masks)
415
+ new_target["class_labels"] = np.array(
416
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
417
+ )
418
+ new_target["iscrowd"] = np.asarray(
419
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
420
+ )
421
+ new_target["area"] = np.asarray(
422
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
423
+ )
424
+
425
+ return new_target
426
+
427
+
428
+ # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image
429
+ def get_segmentation_image(
430
+ masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
431
+ ):
432
+ h, w = input_size
433
+ final_h, final_w = target_size
434
+
435
+ m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
436
+
437
+ if m_id.shape[-1] == 0:
438
+ # We didn't detect any mask :(
439
+ m_id = np.zeros((h, w), dtype=np.int64)
440
+ else:
441
+ m_id = m_id.argmax(-1).reshape(h, w)
442
+
443
+ if deduplicate:
444
+ # Merge the masks corresponding to the same stuff class
445
+ for equiv in stuff_equiv_classes.values():
446
+ for eq_id in equiv:
447
+ m_id[m_id == eq_id] = equiv[0]
448
+
449
+ seg_img = id_to_rgb(m_id)
450
+ seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
451
+ return seg_img
452
+
453
+
454
+ # Copied from transformers.models.detr.image_processing_detr.get_mask_area
455
+ def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
456
+ final_h, final_w = target_size
457
+ np_seg_img = seg_img.astype(np.uint8)
458
+ np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
459
+ m_id = rgb_to_id(np_seg_img)
460
+ area = [(m_id == i).sum() for i in range(n_classes)]
461
+ return area
462
+
463
+
464
+ # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities
465
+ def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
466
+ probs = scipy.special.softmax(logits, axis=-1)
467
+ labels = probs.argmax(-1, keepdims=True)
468
+ scores = np.take_along_axis(probs, labels, axis=-1)
469
+ scores, labels = scores.squeeze(-1), labels.squeeze(-1)
470
+ return scores, labels
471
+
472
+
473
+ # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample
474
+ def post_process_panoptic_sample(
475
+ out_logits: np.ndarray,
476
+ masks: np.ndarray,
477
+ boxes: np.ndarray,
478
+ processed_size: Tuple[int, int],
479
+ target_size: Tuple[int, int],
480
+ is_thing_map: Dict,
481
+ threshold=0.85,
482
+ ) -> Dict:
483
+ """
484
+ Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample.
485
+
486
+ Args:
487
+ out_logits (`torch.Tensor`):
488
+ The logits for this sample.
489
+ masks (`torch.Tensor`):
490
+ The predicted segmentation masks for this sample.
491
+ boxes (`torch.Tensor`):
492
+ The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
493
+ width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
494
+ processed_size (`Tuple[int, int]`):
495
+ The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
496
+ after data augmentation but before batching.
497
+ target_size (`Tuple[int, int]`):
498
+ The target size of the image, `(height, width)` corresponding to the requested final size of the
499
+ prediction.
500
+ is_thing_map (`Dict`):
501
+ A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
502
+ threshold (`float`, *optional*, defaults to 0.85):
503
+ The threshold used to binarize the segmentation masks.
504
+ """
505
+ # we filter empty queries and detection below threshold
506
+ scores, labels = score_labels_from_class_probabilities(out_logits)
507
+ keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
508
+
509
+ cur_scores = scores[keep]
510
+ cur_classes = labels[keep]
511
+ cur_boxes = center_to_corners_format(boxes[keep])
512
+
513
+ if len(cur_boxes) != len(cur_classes):
514
+ raise ValueError("Not as many boxes as there are classes")
515
+
516
+ cur_masks = masks[keep]
517
+ cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
518
+ cur_masks = safe_squeeze(cur_masks, 1)
519
+ b, h, w = cur_masks.shape
520
+
521
+ # It may be that we have several predicted masks for the same stuff class.
522
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
523
+ cur_masks = cur_masks.reshape(b, -1)
524
+ stuff_equiv_classes = defaultdict(list)
525
+ for k, label in enumerate(cur_classes):
526
+ if not is_thing_map[label]:
527
+ stuff_equiv_classes[label].append(k)
528
+
529
+ seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
530
+ area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
531
+
532
+ # We filter out any mask that is too small
533
+ if cur_classes.size() > 0:
534
+ # We know filter empty masks as long as we find some
535
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
536
+ while filtered_small.any():
537
+ cur_masks = cur_masks[~filtered_small]
538
+ cur_scores = cur_scores[~filtered_small]
539
+ cur_classes = cur_classes[~filtered_small]
540
+ seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
541
+ area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
542
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
543
+ else:
544
+ cur_classes = np.ones((1, 1), dtype=np.int64)
545
+
546
+ segments_info = [
547
+ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
548
+ for i, (cat, a) in enumerate(zip(cur_classes, area))
549
+ ]
550
+ del cur_classes
551
+
552
+ with io.BytesIO() as out:
553
+ PIL.Image.fromarray(seg_img).save(out, format="PNG")
554
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
555
+
556
+ return predictions
557
+
558
+
559
+ # Copied from transformers.models.detr.image_processing_detr.resize_annotation
560
+ def resize_annotation(
561
+ annotation: Dict[str, Any],
562
+ orig_size: Tuple[int, int],
563
+ target_size: Tuple[int, int],
564
+ threshold: float = 0.5,
565
+ resample: PILImageResampling = PILImageResampling.NEAREST,
566
+ ):
567
+ """
568
+ Resizes an annotation to a target size.
569
+
570
+ Args:
571
+ annotation (`Dict[str, Any]`):
572
+ The annotation dictionary.
573
+ orig_size (`Tuple[int, int]`):
574
+ The original size of the input image.
575
+ target_size (`Tuple[int, int]`):
576
+ The target size of the image, as returned by the preprocessing `resize` step.
577
+ threshold (`float`, *optional*, defaults to 0.5):
578
+ The threshold used to binarize the segmentation masks.
579
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
580
+ The resampling filter to use when resizing the masks.
581
+ """
582
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
583
+ ratio_height, ratio_width = ratios
584
+
585
+ new_annotation = {}
586
+ new_annotation["size"] = target_size
587
+
588
+ for key, value in annotation.items():
589
+ if key == "boxes":
590
+ boxes = value
591
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
592
+ new_annotation["boxes"] = scaled_boxes
593
+ elif key == "area":
594
+ area = value
595
+ scaled_area = area * (ratio_width * ratio_height)
596
+ new_annotation["area"] = scaled_area
597
+ elif key == "masks":
598
+ masks = value[:, None]
599
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
600
+ masks = masks.astype(np.float32)
601
+ masks = masks[:, 0] > threshold
602
+ new_annotation["masks"] = masks
603
+ elif key == "size":
604
+ new_annotation["size"] = target_size
605
+ else:
606
+ new_annotation[key] = value
607
+
608
+ return new_annotation
609
+
610
+
611
+ # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
612
+ def binary_mask_to_rle(mask):
613
+ """
614
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
615
+
616
+ Args:
617
+ mask (`torch.Tensor` or `numpy.array`):
618
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
619
+ segment_id or class_id.
620
+ Returns:
621
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
622
+ format.
623
+ """
624
+ if is_torch_tensor(mask):
625
+ mask = mask.numpy()
626
+
627
+ pixels = mask.flatten()
628
+ pixels = np.concatenate([[0], pixels, [0]])
629
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
630
+ runs[1::2] -= runs[::2]
631
+ return list(runs)
632
+
633
+
634
+ # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
635
+ def convert_segmentation_to_rle(segmentation):
636
+ """
637
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
638
+
639
+ Args:
640
+ segmentation (`torch.Tensor` or `numpy.array`):
641
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
642
+ Returns:
643
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
644
+ """
645
+ segment_ids = torch.unique(segmentation)
646
+
647
+ run_length_encodings = []
648
+ for idx in segment_ids:
649
+ mask = torch.where(segmentation == idx, 1, 0)
650
+ rle = binary_mask_to_rle(mask)
651
+ run_length_encodings.append(rle)
652
+
653
+ return run_length_encodings
654
+
655
+
656
+ # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
657
+ def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
658
+ """
659
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
660
+ `labels`.
661
+
662
+ Args:
663
+ masks (`torch.Tensor`):
664
+ A tensor of shape `(num_queries, height, width)`.
665
+ scores (`torch.Tensor`):
666
+ A tensor of shape `(num_queries)`.
667
+ labels (`torch.Tensor`):
668
+ A tensor of shape `(num_queries)`.
669
+ object_mask_threshold (`float`):
670
+ A number between 0 and 1 used to binarize the masks.
671
+ Raises:
672
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
673
+ Returns:
674
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
675
+ < `object_mask_threshold`.
676
+ """
677
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
678
+ raise ValueError("mask, scores and labels must have the same shape!")
679
+
680
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
681
+
682
+ return masks[to_keep], scores[to_keep], labels[to_keep]
683
+
684
+
685
+ # Copied from transformers.models.detr.image_processing_detr.check_segment_validity
686
+ def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
687
+ # Get the mask associated with the k class
688
+ mask_k = mask_labels == k
689
+ mask_k_area = mask_k.sum()
690
+
691
+ # Compute the area of all the stuff in query k
692
+ original_area = (mask_probs[k] >= mask_threshold).sum()
693
+ mask_exists = mask_k_area > 0 and original_area > 0
694
+
695
+ # Eliminate disconnected tiny segments
696
+ if mask_exists:
697
+ area_ratio = mask_k_area / original_area
698
+ if not area_ratio.item() > overlap_mask_area_threshold:
699
+ mask_exists = False
700
+
701
+ return mask_exists, mask_k
702
+
703
+
704
+ # Copied from transformers.models.detr.image_processing_detr.compute_segments
705
+ def compute_segments(
706
+ mask_probs,
707
+ pred_scores,
708
+ pred_labels,
709
+ mask_threshold: float = 0.5,
710
+ overlap_mask_area_threshold: float = 0.8,
711
+ label_ids_to_fuse: Optional[Set[int]] = None,
712
+ target_size: Tuple[int, int] = None,
713
+ ):
714
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
715
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
716
+
717
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
718
+ segments: List[Dict] = []
719
+
720
+ if target_size is not None:
721
+ mask_probs = nn.functional.interpolate(
722
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
723
+ )[0]
724
+
725
+ current_segment_id = 0
726
+
727
+ # Weigh each mask by its prediction score
728
+ mask_probs *= pred_scores.view(-1, 1, 1)
729
+ mask_labels = mask_probs.argmax(0) # [height, width]
730
+
731
+ # Keep track of instances of each class
732
+ stuff_memory_list: Dict[str, int] = {}
733
+ for k in range(pred_labels.shape[0]):
734
+ pred_class = pred_labels[k].item()
735
+ should_fuse = pred_class in label_ids_to_fuse
736
+
737
+ # Check if mask exists and large enough to be a segment
738
+ mask_exists, mask_k = check_segment_validity(
739
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
740
+ )
741
+
742
+ if mask_exists:
743
+ if pred_class in stuff_memory_list:
744
+ current_segment_id = stuff_memory_list[pred_class]
745
+ else:
746
+ current_segment_id += 1
747
+
748
+ # Add current object segment to final segmentation map
749
+ segmentation[mask_k] = current_segment_id
750
+ segment_score = round(pred_scores[k].item(), 6)
751
+ segments.append(
752
+ {
753
+ "id": current_segment_id,
754
+ "label_id": pred_class,
755
+ "was_fused": should_fuse,
756
+ "score": segment_score,
757
+ }
758
+ )
759
+ if should_fuse:
760
+ stuff_memory_list[pred_class] = current_segment_id
761
+
762
+ return segmentation, segments
763
+
764
+
765
+ class GroundingDinoImageProcessor(BaseImageProcessor):
766
+ r"""
767
+ Constructs a Grounding DINO image processor.
768
+
769
+ Args:
770
+ format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`):
771
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
772
+ do_resize (`bool`, *optional*, defaults to `True`):
773
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
774
+ overridden by the `do_resize` parameter in the `preprocess` method.
775
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
776
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
777
+ the `preprocess` method.
778
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
779
+ Resampling filter to use if resizing the image.
780
+ do_rescale (`bool`, *optional*, defaults to `True`):
781
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
782
+ `do_rescale` parameter in the `preprocess` method.
783
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
784
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
785
+ `preprocess` method. Controls whether to normalize the image. Can be overridden by the `do_normalize`
786
+ parameter in the `preprocess` method.
787
+ do_normalize (`bool`, *optional*, defaults to `True`):
788
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
789
+ method.
790
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
791
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
792
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
793
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
794
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
795
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
796
+ do_convert_annotations (`bool`, *optional*, defaults to `True`):
797
+ Controls whether to convert the annotations to the format expected by the DETR model. Converts the
798
+ bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
799
+ Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
800
+ do_pad (`bool`, *optional*, defaults to `True`):
801
+ Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be
802
+ overridden by the `do_pad` parameter in the `preprocess` method.
803
+ """
804
+
805
+ model_input_names = ["pixel_values", "pixel_mask"]
806
+
807
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
808
+ def __init__(
809
+ self,
810
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
811
+ do_resize: bool = True,
812
+ size: Dict[str, int] = None,
813
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
814
+ do_rescale: bool = True,
815
+ rescale_factor: Union[int, float] = 1 / 255,
816
+ do_normalize: bool = True,
817
+ image_mean: Union[float, List[float]] = None,
818
+ image_std: Union[float, List[float]] = None,
819
+ do_convert_annotations: Optional[bool] = None,
820
+ do_pad: bool = True,
821
+ **kwargs,
822
+ ) -> None:
823
+ if "pad_and_return_pixel_mask" in kwargs:
824
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
825
+
826
+ if "max_size" in kwargs:
827
+ logger.warning_once(
828
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
829
+ "Please specify in `size['longest_edge'] instead`.",
830
+ )
831
+ max_size = kwargs.pop("max_size")
832
+ else:
833
+ max_size = None if size is None else 1333
834
+
835
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
836
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
837
+
838
+ # Backwards compatibility
839
+ if do_convert_annotations is None:
840
+ do_convert_annotations = do_normalize
841
+
842
+ super().__init__(**kwargs)
843
+ self.format = format
844
+ self.do_resize = do_resize
845
+ self.size = size
846
+ self.resample = resample
847
+ self.do_rescale = do_rescale
848
+ self.rescale_factor = rescale_factor
849
+ self.do_normalize = do_normalize
850
+ self.do_convert_annotations = do_convert_annotations
851
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
852
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
853
+ self.do_pad = do_pad
854
+ self._valid_processor_keys = [
855
+ "images",
856
+ "annotations",
857
+ "return_segmentation_masks",
858
+ "masks_path",
859
+ "do_resize",
860
+ "size",
861
+ "resample",
862
+ "do_rescale",
863
+ "rescale_factor",
864
+ "do_normalize",
865
+ "do_convert_annotations",
866
+ "image_mean",
867
+ "image_std",
868
+ "do_pad",
869
+ "format",
870
+ "return_tensors",
871
+ "data_format",
872
+ "input_data_format",
873
+ ]
874
+
875
+ @classmethod
876
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->GroundingDino
877
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
878
+ """
879
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
880
+ created using from_dict and kwargs e.g. `GroundingDinoImageProcessor.from_pretrained(checkpoint, size=600,
881
+ max_size=800)`
882
+ """
883
+ image_processor_dict = image_processor_dict.copy()
884
+ if "max_size" in kwargs:
885
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
886
+ if "pad_and_return_pixel_mask" in kwargs:
887
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
888
+ return super().from_dict(image_processor_dict, **kwargs)
889
+
890
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->GroundingDino
891
+ def prepare_annotation(
892
+ self,
893
+ image: np.ndarray,
894
+ target: Dict,
895
+ format: Optional[AnnotationFormat] = None,
896
+ return_segmentation_masks: bool = None,
897
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
898
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
899
+ ) -> Dict:
900
+ """
901
+ Prepare an annotation for feeding into GroundingDino model.
902
+ """
903
+ format = format if format is not None else self.format
904
+
905
+ if format == AnnotationFormat.COCO_DETECTION:
906
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
907
+ target = prepare_coco_detection_annotation(
908
+ image, target, return_segmentation_masks, input_data_format=input_data_format
909
+ )
910
+ elif format == AnnotationFormat.COCO_PANOPTIC:
911
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
912
+ target = prepare_coco_panoptic_annotation(
913
+ image,
914
+ target,
915
+ masks_path=masks_path,
916
+ return_masks=return_segmentation_masks,
917
+ input_data_format=input_data_format,
918
+ )
919
+ else:
920
+ raise ValueError(f"Format {format} is not supported.")
921
+ return target
922
+
923
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
924
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
925
+ logger.warning_once(
926
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
927
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
928
+ "does not return the image anymore.",
929
+ )
930
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
931
+ return image, target
932
+
933
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
934
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
935
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
936
+ return convert_coco_poly_to_mask(*args, **kwargs)
937
+
938
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection
939
+ def prepare_coco_detection(self, *args, **kwargs):
940
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
941
+ return prepare_coco_detection_annotation(*args, **kwargs)
942
+
943
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
944
+ def prepare_coco_panoptic(self, *args, **kwargs):
945
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
946
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
947
+
948
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
949
+ def resize(
950
+ self,
951
+ image: np.ndarray,
952
+ size: Dict[str, int],
953
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
954
+ data_format: Optional[ChannelDimension] = None,
955
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
956
+ **kwargs,
957
+ ) -> np.ndarray:
958
+ """
959
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
960
+ int, smaller edge of the image will be matched to this number.
961
+
962
+ Args:
963
+ image (`np.ndarray`):
964
+ Image to resize.
965
+ size (`Dict[str, int]`):
966
+ Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or
967
+ `height` and `width`.
968
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
969
+ Resampling filter to use if resizing the image.
970
+ data_format (`str` or `ChannelDimension`, *optional*):
971
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
972
+ image is used.
973
+ input_data_format (`ChannelDimension` or `str`, *optional*):
974
+ The channel dimension format of the input image. If not provided, it will be inferred.
975
+ """
976
+ if "max_size" in kwargs:
977
+ logger.warning_once(
978
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
979
+ "Please specify in `size['longest_edge'] instead`.",
980
+ )
981
+ max_size = kwargs.pop("max_size")
982
+ else:
983
+ max_size = None
984
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
985
+ if "shortest_edge" in size and "longest_edge" in size:
986
+ size = get_resize_output_image_size(
987
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
988
+ )
989
+ elif "height" in size and "width" in size:
990
+ size = (size["height"], size["width"])
991
+ else:
992
+ raise ValueError(
993
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
994
+ f" {size.keys()}."
995
+ )
996
+ image = resize(
997
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
998
+ )
999
+ return image
1000
+
1001
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
1002
+ def resize_annotation(
1003
+ self,
1004
+ annotation,
1005
+ orig_size,
1006
+ size,
1007
+ resample: PILImageResampling = PILImageResampling.NEAREST,
1008
+ ) -> Dict:
1009
+ """
1010
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
1011
+ to this number.
1012
+ """
1013
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
1014
+
1015
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
1016
+ def rescale(
1017
+ self,
1018
+ image: np.ndarray,
1019
+ rescale_factor: float,
1020
+ data_format: Optional[Union[str, ChannelDimension]] = None,
1021
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1022
+ ) -> np.ndarray:
1023
+ """
1024
+ Rescale the image by the given factor. image = image * rescale_factor.
1025
+
1026
+ Args:
1027
+ image (`np.ndarray`):
1028
+ Image to rescale.
1029
+ rescale_factor (`float`):
1030
+ The value to use for rescaling.
1031
+ data_format (`str` or `ChannelDimension`, *optional*):
1032
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
1033
+ image is used. Can be one of:
1034
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1035
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1036
+ input_data_format (`str` or `ChannelDimension`, *optional*):
1037
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
1038
+ one of:
1039
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1040
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1041
+ """
1042
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
1043
+
1044
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
1045
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
1046
+ """
1047
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
1048
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
1049
+ """
1050
+ return normalize_annotation(annotation, image_size=image_size)
1051
+
1052
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
1053
+ def _update_annotation_for_padded_image(
1054
+ self,
1055
+ annotation: Dict,
1056
+ input_image_size: Tuple[int, int],
1057
+ output_image_size: Tuple[int, int],
1058
+ padding,
1059
+ update_bboxes,
1060
+ ) -> Dict:
1061
+ """
1062
+ Update the annotation for a padded image.
1063
+ """
1064
+ new_annotation = {}
1065
+ new_annotation["size"] = output_image_size
1066
+
1067
+ for key, value in annotation.items():
1068
+ if key == "masks":
1069
+ masks = value
1070
+ masks = pad(
1071
+ masks,
1072
+ padding,
1073
+ mode=PaddingMode.CONSTANT,
1074
+ constant_values=0,
1075
+ input_data_format=ChannelDimension.FIRST,
1076
+ )
1077
+ masks = safe_squeeze(masks, 1)
1078
+ new_annotation["masks"] = masks
1079
+ elif key == "boxes" and update_bboxes:
1080
+ boxes = value
1081
+ boxes *= np.asarray(
1082
+ [
1083
+ input_image_size[1] / output_image_size[1],
1084
+ input_image_size[0] / output_image_size[0],
1085
+ input_image_size[1] / output_image_size[1],
1086
+ input_image_size[0] / output_image_size[0],
1087
+ ]
1088
+ )
1089
+ new_annotation["boxes"] = boxes
1090
+ elif key == "size":
1091
+ new_annotation["size"] = output_image_size
1092
+ else:
1093
+ new_annotation[key] = value
1094
+ return new_annotation
1095
+
1096
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
1097
+ def _pad_image(
1098
+ self,
1099
+ image: np.ndarray,
1100
+ output_size: Tuple[int, int],
1101
+ annotation: Optional[Dict[str, Any]] = None,
1102
+ constant_values: Union[float, Iterable[float]] = 0,
1103
+ data_format: Optional[ChannelDimension] = None,
1104
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1105
+ update_bboxes: bool = True,
1106
+ ) -> np.ndarray:
1107
+ """
1108
+ Pad an image with zeros to the given size.
1109
+ """
1110
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
1111
+ output_height, output_width = output_size
1112
+
1113
+ pad_bottom = output_height - input_height
1114
+ pad_right = output_width - input_width
1115
+ padding = ((0, pad_bottom), (0, pad_right))
1116
+ padded_image = pad(
1117
+ image,
1118
+ padding,
1119
+ mode=PaddingMode.CONSTANT,
1120
+ constant_values=constant_values,
1121
+ data_format=data_format,
1122
+ input_data_format=input_data_format,
1123
+ )
1124
+ if annotation is not None:
1125
+ annotation = self._update_annotation_for_padded_image(
1126
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
1127
+ )
1128
+ return padded_image, annotation
1129
+
1130
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
1131
+ def pad(
1132
+ self,
1133
+ images: List[np.ndarray],
1134
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1135
+ constant_values: Union[float, Iterable[float]] = 0,
1136
+ return_pixel_mask: bool = True,
1137
+ return_tensors: Optional[Union[str, TensorType]] = None,
1138
+ data_format: Optional[ChannelDimension] = None,
1139
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1140
+ update_bboxes: bool = True,
1141
+ ) -> BatchFeature:
1142
+ """
1143
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
1144
+ in the batch and optionally returns their corresponding pixel mask.
1145
+
1146
+ Args:
1147
+ images (List[`np.ndarray`]):
1148
+ Images to pad.
1149
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1150
+ Annotations to transform according to the padding that is applied to the images.
1151
+ constant_values (`float` or `Iterable[float]`, *optional*):
1152
+ The value to use for the padding if `mode` is `"constant"`.
1153
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
1154
+ Whether to return a pixel mask.
1155
+ return_tensors (`str` or `TensorType`, *optional*):
1156
+ The type of tensors to return. Can be one of:
1157
+ - Unset: Return a list of `np.ndarray`.
1158
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
1159
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
1160
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
1161
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
1162
+ data_format (`str` or `ChannelDimension`, *optional*):
1163
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
1164
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1165
+ The channel dimension format of the input image. If not provided, it will be inferred.
1166
+ update_bboxes (`bool`, *optional*, defaults to `True`):
1167
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
1168
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
1169
+ format, the bounding boxes will not be updated.
1170
+ """
1171
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
1172
+
1173
+ annotation_list = annotations if annotations is not None else [None] * len(images)
1174
+ padded_images = []
1175
+ padded_annotations = []
1176
+ for image, annotation in zip(images, annotation_list):
1177
+ padded_image, padded_annotation = self._pad_image(
1178
+ image,
1179
+ pad_size,
1180
+ annotation,
1181
+ constant_values=constant_values,
1182
+ data_format=data_format,
1183
+ input_data_format=input_data_format,
1184
+ update_bboxes=update_bboxes,
1185
+ )
1186
+ padded_images.append(padded_image)
1187
+ padded_annotations.append(padded_annotation)
1188
+
1189
+ data = {"pixel_values": padded_images}
1190
+
1191
+ if return_pixel_mask:
1192
+ masks = [
1193
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
1194
+ for image in images
1195
+ ]
1196
+ data["pixel_mask"] = masks
1197
+
1198
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
1199
+
1200
+ if annotations is not None:
1201
+ encoded_inputs["labels"] = [
1202
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
1203
+ ]
1204
+
1205
+ return encoded_inputs
1206
+
1207
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
1208
+ def preprocess(
1209
+ self,
1210
+ images: ImageInput,
1211
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1212
+ return_segmentation_masks: bool = None,
1213
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
1214
+ do_resize: Optional[bool] = None,
1215
+ size: Optional[Dict[str, int]] = None,
1216
+ resample=None, # PILImageResampling
1217
+ do_rescale: Optional[bool] = None,
1218
+ rescale_factor: Optional[Union[int, float]] = None,
1219
+ do_normalize: Optional[bool] = None,
1220
+ do_convert_annotations: Optional[bool] = None,
1221
+ image_mean: Optional[Union[float, List[float]]] = None,
1222
+ image_std: Optional[Union[float, List[float]]] = None,
1223
+ do_pad: Optional[bool] = None,
1224
+ format: Optional[Union[str, AnnotationFormat]] = None,
1225
+ return_tensors: Optional[Union[TensorType, str]] = None,
1226
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
1227
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1228
+ **kwargs,
1229
+ ) -> BatchFeature:
1230
+ """
1231
+ Preprocess an image or a batch of images so that it can be used by the model.
1232
+
1233
+ Args:
1234
+ images (`ImageInput`):
1235
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
1236
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
1237
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1238
+ List of annotations associated with the image or batch of images. If annotation is for object
1239
+ detection, the annotations should be a dictionary with the following keys:
1240
+ - "image_id" (`int`): The image id.
1241
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
1242
+ dictionary. An image can have no annotations, in which case the list should be empty.
1243
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
1244
+ - "image_id" (`int`): The image id.
1245
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
1246
+ An image can have no segments, in which case the list should be empty.
1247
+ - "file_name" (`str`): The file name of the image.
1248
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
1249
+ Whether to return segmentation masks.
1250
+ masks_path (`str` or `pathlib.Path`, *optional*):
1251
+ Path to the directory containing the segmentation masks.
1252
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
1253
+ Whether to resize the image.
1254
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
1255
+ Size of the image after resizing.
1256
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
1257
+ Resampling filter to use when resizing the image.
1258
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
1259
+ Whether to rescale the image.
1260
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
1261
+ Rescale factor to use when rescaling the image.
1262
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
1263
+ Whether to normalize the image.
1264
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
1265
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
1266
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
1267
+ and in relative coordinates.
1268
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
1269
+ Mean to use when normalizing the image.
1270
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
1271
+ Standard deviation to use when normalizing the image.
1272
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
1273
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
1274
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
1275
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
1276
+ Format of the annotations.
1277
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
1278
+ Type of tensors to return. If `None`, will return the list of images.
1279
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
1280
+ The channel dimension format for the output image. Can be one of:
1281
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1282
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1283
+ - Unset: Use the channel dimension format of the input image.
1284
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1285
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
1286
+ from the input image. Can be one of:
1287
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1288
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1289
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
1290
+ """
1291
+ if "pad_and_return_pixel_mask" in kwargs:
1292
+ logger.warning_once(
1293
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
1294
+ "use `do_pad` instead."
1295
+ )
1296
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
1297
+
1298
+ max_size = None
1299
+ if "max_size" in kwargs:
1300
+ logger.warning_once(
1301
+ "The `max_size` argument is deprecated and will be removed in a future version, use"
1302
+ " `size['longest_edge']` instead."
1303
+ )
1304
+ size = kwargs.pop("max_size")
1305
+
1306
+ do_resize = self.do_resize if do_resize is None else do_resize
1307
+ size = self.size if size is None else size
1308
+ size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
1309
+ resample = self.resample if resample is None else resample
1310
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
1311
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
1312
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
1313
+ image_mean = self.image_mean if image_mean is None else image_mean
1314
+ image_std = self.image_std if image_std is None else image_std
1315
+ do_convert_annotations = (
1316
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
1317
+ )
1318
+ do_pad = self.do_pad if do_pad is None else do_pad
1319
+ format = self.format if format is None else format
1320
+
1321
+ images = make_list_of_images(images)
1322
+
1323
+ if not valid_images(images):
1324
+ raise ValueError(
1325
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
1326
+ "torch.Tensor, tf.Tensor or jax.ndarray."
1327
+ )
1328
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
1329
+
1330
+ # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
1331
+ validate_preprocess_arguments(
1332
+ do_rescale=do_rescale,
1333
+ rescale_factor=rescale_factor,
1334
+ do_normalize=do_normalize,
1335
+ image_mean=image_mean,
1336
+ image_std=image_std,
1337
+ do_resize=do_resize,
1338
+ size=size,
1339
+ resample=resample,
1340
+ )
1341
+
1342
+ if annotations is not None and isinstance(annotations, dict):
1343
+ annotations = [annotations]
1344
+
1345
+ if annotations is not None and len(images) != len(annotations):
1346
+ raise ValueError(
1347
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
1348
+ )
1349
+
1350
+ format = AnnotationFormat(format)
1351
+ if annotations is not None:
1352
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
1353
+
1354
+ if (
1355
+ masks_path is not None
1356
+ and format == AnnotationFormat.COCO_PANOPTIC
1357
+ and not isinstance(masks_path, (pathlib.Path, str))
1358
+ ):
1359
+ raise ValueError(
1360
+ "The path to the directory containing the mask PNG files should be provided as a"
1361
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
1362
+ )
1363
+
1364
+ # All transformations expect numpy arrays
1365
+ images = [to_numpy_array(image) for image in images]
1366
+
1367
+ if is_scaled_image(images[0]) and do_rescale:
1368
+ logger.warning_once(
1369
+ "It looks like you are trying to rescale already rescaled images. If the input"
1370
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1371
+ )
1372
+
1373
+ if input_data_format is None:
1374
+ # We assume that all images have the same channel dimension format.
1375
+ input_data_format = infer_channel_dimension_format(images[0])
1376
+
1377
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
1378
+ if annotations is not None:
1379
+ prepared_images = []
1380
+ prepared_annotations = []
1381
+ for image, target in zip(images, annotations):
1382
+ target = self.prepare_annotation(
1383
+ image,
1384
+ target,
1385
+ format,
1386
+ return_segmentation_masks=return_segmentation_masks,
1387
+ masks_path=masks_path,
1388
+ input_data_format=input_data_format,
1389
+ )
1390
+ prepared_images.append(image)
1391
+ prepared_annotations.append(target)
1392
+ images = prepared_images
1393
+ annotations = prepared_annotations
1394
+ del prepared_images, prepared_annotations
1395
+
1396
+ # transformations
1397
+ if do_resize:
1398
+ if annotations is not None:
1399
+ resized_images, resized_annotations = [], []
1400
+ for image, target in zip(images, annotations):
1401
+ orig_size = get_image_size(image, input_data_format)
1402
+ resized_image = self.resize(
1403
+ image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format
1404
+ )
1405
+ resized_annotation = self.resize_annotation(
1406
+ target, orig_size, get_image_size(resized_image, input_data_format)
1407
+ )
1408
+ resized_images.append(resized_image)
1409
+ resized_annotations.append(resized_annotation)
1410
+ images = resized_images
1411
+ annotations = resized_annotations
1412
+ del resized_images, resized_annotations
1413
+ else:
1414
+ images = [
1415
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
1416
+ for image in images
1417
+ ]
1418
+
1419
+ if do_rescale:
1420
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
1421
+
1422
+ if do_normalize:
1423
+ images = [
1424
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
1425
+ ]
1426
+
1427
+ if do_convert_annotations and annotations is not None:
1428
+ annotations = [
1429
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
1430
+ for annotation, image in zip(annotations, images)
1431
+ ]
1432
+
1433
+ if do_pad:
1434
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
1435
+ encoded_inputs = self.pad(
1436
+ images,
1437
+ annotations=annotations,
1438
+ return_pixel_mask=True,
1439
+ data_format=data_format,
1440
+ input_data_format=input_data_format,
1441
+ update_bboxes=do_convert_annotations,
1442
+ return_tensors=return_tensors,
1443
+ )
1444
+ else:
1445
+ images = [
1446
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1447
+ for image in images
1448
+ ]
1449
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
1450
+ if annotations is not None:
1451
+ encoded_inputs["labels"] = [
1452
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1453
+ ]
1454
+
1455
+ return encoded_inputs
1456
+
1457
+ # Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_object_detection with OwlViT->GroundingDino
1458
+ def post_process_object_detection(
1459
+ self, outputs, threshold: float = 0.1, target_sizes: Union[TensorType, List[Tuple]] = None
1460
+ ):
1461
+ """
1462
+ Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
1463
+ bottom_right_x, bottom_right_y) format.
1464
+
1465
+ Args:
1466
+ outputs ([`GroundingDinoObjectDetectionOutput`]):
1467
+ Raw outputs of the model.
1468
+ threshold (`float`, *optional*):
1469
+ Score threshold to keep object detection predictions.
1470
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1471
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1472
+ `(height, width)` of each image in the batch. If unset, predictions will not be resized.
1473
+ Returns:
1474
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1475
+ in the batch as predicted by the model.
1476
+ """
1477
+ # TODO: (amy) add support for other frameworks
1478
+ logits, boxes = outputs.logits, outputs.pred_boxes
1479
+
1480
+ if target_sizes is not None:
1481
+ if len(logits) != len(target_sizes):
1482
+ raise ValueError(
1483
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1484
+ )
1485
+
1486
+ probs = torch.max(logits, dim=-1)
1487
+ scores = torch.sigmoid(probs.values)
1488
+ labels = probs.indices
1489
+
1490
+ # Convert to [x0, y0, x1, y1] format
1491
+ boxes = center_to_corners_format(boxes)
1492
+
1493
+ # Convert from relative [0, 1] to absolute [0, height] coordinates
1494
+ if target_sizes is not None:
1495
+ if isinstance(target_sizes, List):
1496
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1497
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1498
+ else:
1499
+ img_h, img_w = target_sizes.unbind(1)
1500
+
1501
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1502
+ boxes = boxes * scale_fct[:, None, :]
1503
+
1504
+ results = []
1505
+ for s, l, b in zip(scores, labels, boxes):
1506
+ score = s[s > threshold]
1507
+ label = l[s > threshold]
1508
+ box = b[s > threshold]
1509
+ results.append({"scores": score, "labels": label, "boxes": box})
1510
+
1511
+ return results
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/modeling_grounding_dino.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/grounding_dino/processing_grounding_dino.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Grounding DINO.
17
+ """
18
+
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ from ...image_processing_utils import BatchFeature
22
+ from ...image_transforms import center_to_corners_format
23
+ from ...image_utils import ImageInput
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
26
+ from ...utils import TensorType, is_torch_available
27
+
28
+
29
+ if is_torch_available():
30
+ import torch
31
+
32
+
33
+ def get_phrases_from_posmap(posmaps, input_ids):
34
+ """Get token ids of phrases from posmaps and input_ids.
35
+
36
+ Args:
37
+ posmaps (`torch.BoolTensor` of shape `(num_boxes, hidden_size)`):
38
+ A boolean tensor of text-thresholded logits related to the detected bounding boxes.
39
+ input_ids (`torch.LongTensor`) of shape `(sequence_length, )`):
40
+ A tensor of token ids.
41
+ """
42
+ left_idx = 0
43
+ right_idx = posmaps.shape[-1] - 1
44
+
45
+ # Avoiding altering the input tensor
46
+ posmaps = posmaps.clone()
47
+
48
+ posmaps[:, 0 : left_idx + 1] = False
49
+ posmaps[:, right_idx:] = False
50
+
51
+ token_ids = []
52
+ for posmap in posmaps:
53
+ non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
54
+ token_ids.append([input_ids[i] for i in non_zero_idx])
55
+
56
+ return token_ids
57
+
58
+
59
+ class GroundingDinoProcessor(ProcessorMixin):
60
+ r"""
61
+ Constructs a Grounding DINO processor which wraps a Deformable DETR image processor and a BERT tokenizer into a
62
+ single processor.
63
+
64
+ [`GroundingDinoProcessor`] offers all the functionalities of [`GroundingDinoImageProcessor`] and
65
+ [`AutoTokenizer`]. See the docstring of [`~GroundingDinoProcessor.__call__`] and [`~GroundingDinoProcessor.decode`]
66
+ for more information.
67
+
68
+ Args:
69
+ image_processor (`GroundingDinoImageProcessor`):
70
+ An instance of [`GroundingDinoImageProcessor`]. The image processor is a required input.
71
+ tokenizer (`AutoTokenizer`):
72
+ An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
73
+ """
74
+
75
+ attributes = ["image_processor", "tokenizer"]
76
+ image_processor_class = "GroundingDinoImageProcessor"
77
+ tokenizer_class = "AutoTokenizer"
78
+
79
+ def __init__(self, image_processor, tokenizer):
80
+ super().__init__(image_processor, tokenizer)
81
+
82
+ def __call__(
83
+ self,
84
+ images: ImageInput = None,
85
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
86
+ add_special_tokens: bool = True,
87
+ padding: Union[bool, str, PaddingStrategy] = False,
88
+ truncation: Union[bool, str, TruncationStrategy] = None,
89
+ max_length: Optional[int] = None,
90
+ stride: int = 0,
91
+ pad_to_multiple_of: Optional[int] = None,
92
+ return_attention_mask: Optional[bool] = None,
93
+ return_overflowing_tokens: bool = False,
94
+ return_special_tokens_mask: bool = False,
95
+ return_offsets_mapping: bool = False,
96
+ return_token_type_ids: bool = True,
97
+ return_length: bool = False,
98
+ verbose: bool = True,
99
+ return_tensors: Optional[Union[str, TensorType]] = None,
100
+ **kwargs,
101
+ ) -> BatchEncoding:
102
+ """
103
+ This method uses [`GroundingDinoImageProcessor.__call__`] method to prepare image(s) for the model, and
104
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
105
+
106
+ Please refer to the docstring of the above two methods for more information.
107
+ """
108
+ if images is None and text is None:
109
+ raise ValueError("You have to specify either images or text.")
110
+
111
+ # Get only text
112
+ if images is not None:
113
+ encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
114
+ else:
115
+ encoding_image_processor = BatchFeature()
116
+
117
+ if text is not None:
118
+ text_encoding = self.tokenizer(
119
+ text=text,
120
+ add_special_tokens=add_special_tokens,
121
+ padding=padding,
122
+ truncation=truncation,
123
+ max_length=max_length,
124
+ stride=stride,
125
+ pad_to_multiple_of=pad_to_multiple_of,
126
+ return_attention_mask=return_attention_mask,
127
+ return_overflowing_tokens=return_overflowing_tokens,
128
+ return_special_tokens_mask=return_special_tokens_mask,
129
+ return_offsets_mapping=return_offsets_mapping,
130
+ return_token_type_ids=return_token_type_ids,
131
+ return_length=return_length,
132
+ verbose=verbose,
133
+ return_tensors=return_tensors,
134
+ **kwargs,
135
+ )
136
+ else:
137
+ text_encoding = BatchEncoding()
138
+
139
+ text_encoding.update(encoding_image_processor)
140
+
141
+ return text_encoding
142
+
143
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
144
+ def batch_decode(self, *args, **kwargs):
145
+ """
146
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
147
+ refer to the docstring of this method for more information.
148
+ """
149
+ return self.tokenizer.batch_decode(*args, **kwargs)
150
+
151
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
152
+ def decode(self, *args, **kwargs):
153
+ """
154
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
155
+ the docstring of this method for more information.
156
+ """
157
+ return self.tokenizer.decode(*args, **kwargs)
158
+
159
+ @property
160
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
161
+ def model_input_names(self):
162
+ tokenizer_input_names = self.tokenizer.model_input_names
163
+ image_processor_input_names = self.image_processor.model_input_names
164
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
165
+
166
+ def post_process_grounded_object_detection(
167
+ self,
168
+ outputs,
169
+ input_ids,
170
+ box_threshold: float = 0.25,
171
+ text_threshold: float = 0.25,
172
+ target_sizes: Union[TensorType, List[Tuple]] = None,
173
+ ):
174
+ """
175
+ Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
176
+ bottom_right_x, bottom_right_y) format and get the associated text label.
177
+
178
+ Args:
179
+ outputs ([`GroundingDinoObjectDetectionOutput`]):
180
+ Raw outputs of the model.
181
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
182
+ The token ids of the input text.
183
+ box_threshold (`float`, *optional*, defaults to 0.25):
184
+ Score threshold to keep object detection predictions.
185
+ text_threshold (`float`, *optional*, defaults to 0.25):
186
+ Score threshold to keep text detection predictions.
187
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
188
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
189
+ `(height, width)` of each image in the batch. If unset, predictions will not be resized.
190
+ Returns:
191
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
192
+ in the batch as predicted by the model.
193
+ """
194
+ logits, boxes = outputs.logits, outputs.pred_boxes
195
+
196
+ if target_sizes is not None:
197
+ if len(logits) != len(target_sizes):
198
+ raise ValueError(
199
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
200
+ )
201
+
202
+ probs = torch.sigmoid(logits) # (batch_size, num_queries, 256)
203
+ scores = torch.max(probs, dim=-1)[0] # (batch_size, num_queries)
204
+
205
+ # Convert to [x0, y0, x1, y1] format
206
+ boxes = center_to_corners_format(boxes)
207
+
208
+ # Convert from relative [0, 1] to absolute [0, height] coordinates
209
+ if target_sizes is not None:
210
+ if isinstance(target_sizes, List):
211
+ img_h = torch.Tensor([i[0] for i in target_sizes])
212
+ img_w = torch.Tensor([i[1] for i in target_sizes])
213
+ else:
214
+ img_h, img_w = target_sizes.unbind(1)
215
+
216
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
217
+ boxes = boxes * scale_fct[:, None, :]
218
+
219
+ results = []
220
+ for idx, (s, b, p) in enumerate(zip(scores, boxes, probs)):
221
+ score = s[s > box_threshold]
222
+ box = b[s > box_threshold]
223
+ prob = p[s > box_threshold]
224
+ label_ids = get_phrases_from_posmap(prob > text_threshold, input_ids[idx])
225
+ label = self.batch_decode(label_ids)
226
+ results.append({"scores": score, "labels": label, "boxes": box})
227
+
228
+ return results
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__init__.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_idefics2": ["Idefics2Config"]}
20
+
21
+
22
+ try:
23
+ if not is_vision_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["image_processing_idefics2"] = ["Idefics2ImageProcessor"]
29
+
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_idefics2"] = [
38
+ "IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "Idefics2ForConditionalGeneration",
40
+ "Idefics2PreTrainedModel",
41
+ "Idefics2Model",
42
+ ]
43
+ _import_structure["processing_idefics2"] = ["Idefics2Processor"]
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_idefics2 import Idefics2Config
47
+
48
+ try:
49
+ if not is_vision_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .image_processing_idefics2 import Idefics2ImageProcessor
55
+
56
+ try:
57
+ if not is_torch_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .modeling_idefics2 import (
63
+ IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST,
64
+ Idefics2ForConditionalGeneration,
65
+ Idefics2Model,
66
+ Idefics2PreTrainedModel,
67
+ )
68
+ from .processing_idefics2 import Idefics2Processor
69
+
70
+
71
+ else:
72
+ import sys
73
+
74
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/__pycache__/processing_idefics2.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/configuration_idefics2.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Idefics2 model configuration"""
15
+
16
+ import os
17
+ from typing import Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+ from ..auto import CONFIG_MAPPING
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class Idefics2VisionConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`Idefics2VisionModel`]. It is used to instantiate a
30
+ Idefics2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
32
+ [google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics2 model
33
+ [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b).
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ hidden_size (`int`, *optional*, defaults to 768):
40
+ Dimensionality of the encoder layers and the pooler layer.
41
+ intermediate_size (`int`, *optional*, defaults to 3072):
42
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ num_channels (`int`, *optional*, defaults to 3):
48
+ Number of channels in the input images.
49
+ image_size (`int`, *optional*, defaults to 224):
50
+ The size (resolution) of each image.
51
+ patch_size (`int`, *optional*, defaults to 32):
52
+ The size (resolution) of each patch.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
56
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
57
+ The epsilon used by the layer normalization layers.
58
+ attention_dropout (`float`, *optional*, defaults to 0.0):
59
+ The dropout ratio for the attention probabilities.
60
+ intializer_range (`float`, *optional*, defaults to 0.02):
61
+ The standard deviation for initializing all weight matrices in the model.
62
+
63
+ Example:
64
+
65
+ ```python
66
+ >>> from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
67
+ >>> from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig
68
+
69
+ >>> # Initializing a Idefics2VisionConfig with google/siglip-base-patch16-224 style configuration
70
+ >>> configuration = Idefics2VisionConfig()
71
+
72
+ >>> # Initializing a Idefics2VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
73
+ >>> model = Idefics2VisionTransformer(configuration)
74
+
75
+ >>> # Accessing the model configuration
76
+ >>> configuration = model.config
77
+ ```"""
78
+
79
+ model_type = "idefics2"
80
+
81
+ def __init__(
82
+ self,
83
+ hidden_size=768,
84
+ intermediate_size=3072,
85
+ num_hidden_layers=12,
86
+ num_attention_heads=12,
87
+ num_channels=3,
88
+ image_size=224,
89
+ patch_size=32,
90
+ hidden_act="gelu_pytorch_tanh",
91
+ layer_norm_eps=1e-6,
92
+ attention_dropout=0.0,
93
+ initializer_range=0.02,
94
+ **kwargs,
95
+ ):
96
+ super().__init__(**kwargs)
97
+
98
+ self.hidden_size = hidden_size
99
+ self.intermediate_size = intermediate_size
100
+ self.num_hidden_layers = num_hidden_layers
101
+ self.num_attention_heads = num_attention_heads
102
+ self.num_channels = num_channels
103
+ self.patch_size = patch_size
104
+ self.image_size = image_size
105
+ self.attention_dropout = attention_dropout
106
+ self.layer_norm_eps = layer_norm_eps
107
+ self.hidden_act = hidden_act
108
+ self.initializer_range = initializer_range
109
+
110
+ @classmethod
111
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
112
+ cls._set_token_in_kwargs(kwargs)
113
+
114
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
115
+
116
+ # get the vision config dict if we are loading from Idefics2Config
117
+ if config_dict.get("model_type") == "idefics2":
118
+ config_dict = config_dict["vision_config"]
119
+
120
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
121
+ logger.warning(
122
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
123
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
124
+ )
125
+
126
+ return cls.from_dict(config_dict, **kwargs)
127
+
128
+
129
+ class Idefics2PerceiverConfig(PretrainedConfig):
130
+ r"""
131
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
132
+ documentation from [`PretrainedConfig`] for more information.
133
+
134
+ Args:
135
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
136
+ The non-linear activation function (function or string) in the perceiver block.
137
+ resampler_n_latents (`int`, *optional*, defaults to 64):
138
+ Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
139
+ resampler_depth (`int`, *optional*, defaults to 3):
140
+ Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (<= 3).
141
+ resampler_n_heads (`int`, *optional*, defaults to 16):
142
+ Number of heads in each Transformer block (for multi-headed self-attention).
143
+ resampler_head_dim (`int`, *optional*, defaults to 96):
144
+ Dimensionality of each head projection in the Transformer block.
145
+ num_key_value_heads (`int`, *optional*, defaults to 4):
146
+ Number of key-value heads in the perceiver attention block.
147
+ attention_dropout (`float`, *optional*, defaults to 0.0):
148
+ The dropout ratio for the attention probabilities.
149
+ """
150
+
151
+ model_type = "idefics2"
152
+
153
+ def __init__(
154
+ self,
155
+ hidden_act="silu",
156
+ resampler_n_latents=64,
157
+ resampler_depth=3,
158
+ resampler_n_heads=16,
159
+ resampler_head_dim=96,
160
+ num_key_value_heads=4,
161
+ attention_dropout=0.0,
162
+ **kwargs,
163
+ ):
164
+ self.hidden_act = hidden_act
165
+ self.resampler_n_latents = resampler_n_latents
166
+ self.resampler_depth = resampler_depth
167
+ self.resampler_n_heads = resampler_n_heads
168
+ self.num_key_value_heads = num_key_value_heads
169
+ self.resampler_head_dim = resampler_head_dim
170
+ self.attention_dropout = attention_dropout
171
+ if self.num_key_value_heads > self.resampler_n_heads:
172
+ raise ValueError(
173
+ f"num_key_value_heads={self.num_key_value_heads} must be less than or equal to"
174
+ f" resampler_n_heads={self.resampler_n_heads}"
175
+ )
176
+ super().__init__(**kwargs)
177
+
178
+
179
+ class Idefics2Config(PretrainedConfig):
180
+ r"""
181
+ This is the configuration class to store the configuration of a [`Idefics2Model`]. It is used to instantiate a
182
+ Idefics2 model according to the specified arguments, defining the model architecture. Instantiating a
183
+ configuration with the defaults will yield a similar configuration to that of the model of the Idefics2
184
+ [HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b) architecture.
185
+
186
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
187
+ documentation from [`PretrainedConfig`] for more information.
188
+
189
+ Args:
190
+ use_cache (`bool`, *optional*, defaults to `True`):
191
+ Whether or not the model should cache the key/value pairs of the attention mechanism.
192
+ image_token_id (`int`, *optional*, defaults to 32001):
193
+ The id of the "image" token.
194
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
195
+ Whether or not to tie the word embeddings with the token embeddings.
196
+ vision_config (`IdeficsVisionConfig` or `dict`, *optional*):
197
+ Custom vision config or dict
198
+ perceiver_config (`IdeficsPerceiverConfig` or `dict`, *optional*):
199
+ Custom perceiver config or dict
200
+ text_config (`MistralConfig` or `dict`, *optional*):
201
+ Custom text config or dict for the text model
202
+
203
+ Example:
204
+ ```python
205
+ >>> from transformers import Idefics2Model, Idefics2Config
206
+ >>> # Initializing configuration
207
+ >>> configuration = Idefics2Config()
208
+ >>> # Initializing a model from the configuration
209
+ >>> model = Idefics2Model(configuration)
210
+ >>> # Accessing the model configuration
211
+ >>> configuration = model.config
212
+ ```"""
213
+
214
+ model_type = "idefics2"
215
+ is_composition = True
216
+
217
+ def __init__(
218
+ self,
219
+ use_cache=True,
220
+ image_token_id=32_001,
221
+ tie_word_embeddings=False,
222
+ vision_config=None,
223
+ perceiver_config=None,
224
+ text_config=None,
225
+ **kwargs,
226
+ ):
227
+ self.image_token_id = image_token_id
228
+ self.use_cache = use_cache
229
+ self.tie_word_embeddings = tie_word_embeddings
230
+
231
+ if perceiver_config is None:
232
+ self.perceiver_config = Idefics2PerceiverConfig()
233
+ logger.info("perciver_config is None, using default perceiver config")
234
+ elif isinstance(perceiver_config, dict):
235
+ self.perceiver_config = Idefics2PerceiverConfig(**perceiver_config)
236
+ elif isinstance(perceiver_config, Idefics2PerceiverConfig):
237
+ self.perceiver_config = perceiver_config
238
+
239
+ if vision_config is None:
240
+ self.vision_config = Idefics2VisionConfig()
241
+ logger.info("vision_config is None, using default vision config")
242
+ elif isinstance(vision_config, dict):
243
+ self.vision_config = Idefics2VisionConfig(**vision_config)
244
+ elif isinstance(vision_config, Idefics2VisionConfig):
245
+ self.vision_config = vision_config
246
+
247
+ if isinstance(text_config, dict):
248
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "mistral"
249
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
250
+ elif text_config is None:
251
+ logger.info("text_config is None, using default text config")
252
+ text_config = CONFIG_MAPPING["mistral"](
253
+ max_position_embeddings=4096 * 8,
254
+ rms_norm_eps=1e-5,
255
+ # None in the original configuration_mistral, we set it to the unk_token_id
256
+ pad_token_id=0,
257
+ tie_word_embeddings=False,
258
+ )
259
+
260
+ self.text_config = text_config
261
+
262
+ super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings)
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/convert_idefics2_weights_to_hf.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import copy
17
+
18
+ import torch
19
+ from accelerate import init_empty_weights
20
+
21
+ from transformers import (
22
+ AutoConfig,
23
+ AutoModelForCausalLM,
24
+ AutoTokenizer,
25
+ Idefics2Config,
26
+ Idefics2ForConditionalGeneration,
27
+ Idefics2ImageProcessor,
28
+ Idefics2Processor,
29
+ MistralConfig,
30
+ )
31
+
32
+
33
+ EPILOG_TXT = """Example:
34
+ python transformers/src/transformers/models/idefics2/convert_idefics2_weights_to_hf.py --original_model_id HuggingFaceM4/idefics2-8b --output_hub_path org/idefics2
35
+ """
36
+
37
+
38
+ KEYS_TO_MODIFY_MAPPING = {
39
+ "lm_head.weight": "lm_head.linear.weight",
40
+ "model.layers": "model.text_model.layers",
41
+ "model.norm": "model.text_model.norm",
42
+ "model.perceiver_resampler": "model.connector.perceiver_resampler",
43
+ "model.modality_projection": "model.connector.modality_projection",
44
+ }
45
+
46
+
47
+ WEIGHTS_TO_MERGE_MAPPING = (
48
+ # (weights to merge in merging order), (new weight name)
49
+ (
50
+ ("model.embed_tokens.weight", "model.embed_tokens.additional_embedding.weight"),
51
+ "model.text_model.embed_tokens.weight",
52
+ ),
53
+ (("lm_head.linear.weight", "additional_fc.weight"), "lm_head.weight"),
54
+ )
55
+
56
+
57
+ def convert_state_dict_to_hf(state_dict):
58
+ new_state_dict = {}
59
+ for key, value in state_dict.items():
60
+ if key.endswith(".inv_freq"):
61
+ continue
62
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
63
+ if key_to_modify in key:
64
+ key = key.replace(key_to_modify, new_key)
65
+
66
+ new_state_dict[key] = value
67
+ return new_state_dict
68
+
69
+
70
+ def merge_weights(state_dict):
71
+ new_state_dict = copy.deepcopy(state_dict)
72
+
73
+ # Merge the weights
74
+ for weights_to_merge, new_weight_name in WEIGHTS_TO_MERGE_MAPPING:
75
+ for weight in weights_to_merge:
76
+ assert weight in state_dict, f"Weight {weight} is missing in the state dict"
77
+ if new_weight_name not in new_state_dict:
78
+ new_state_dict[new_weight_name] = [state_dict[weight]]
79
+ else:
80
+ new_state_dict[new_weight_name].append(state_dict[weight])
81
+ new_state_dict[new_weight_name] = torch.cat(new_state_dict[new_weight_name], dim=0)
82
+
83
+ # Remove the weights that were merged
84
+ for weights_to_merge, new_weight_name in WEIGHTS_TO_MERGE_MAPPING:
85
+ for weight in weights_to_merge:
86
+ if weight in new_state_dict and weight != new_weight_name:
87
+ new_state_dict.pop(weight)
88
+
89
+ return new_state_dict
90
+
91
+
92
+ def get_config(checkpoint):
93
+ if checkpoint == "HuggingFaceM4/idefics2":
94
+ # We load the config then recreate to use the text_config
95
+ config = AutoConfig.from_pretrained(checkpoint)
96
+ text_config = MistralConfig(
97
+ vocab_size=config.vocab_size + config.additional_vocab_size,
98
+ hidden_size=config.hidden_size,
99
+ intermediate_size=config.intermediate_size,
100
+ num_hidden_layers=config.num_hidden_layers,
101
+ num_attention_heads=config.num_attention_heads,
102
+ num_key_value_heads=config.num_key_value_heads,
103
+ hidden_act=config.hidden_act,
104
+ max_position_embeddings=config.max_position_embeddings,
105
+ initializer_range=config.initializer_range,
106
+ rms_norm_eps=config.rms_norm_eps,
107
+ tie_word_embeddings=config.tie_word_embeddings,
108
+ rope_theta=config.rope_theta,
109
+ sliding_window=config.sliding_window,
110
+ attention_dropout=config.attention_dropout,
111
+ pad_token_id=config.pad_token_id,
112
+ bos_token_id=config.bos_token_id,
113
+ eos_token_id=config.eos_token_id,
114
+ )
115
+ perceiver_config = config.perceiver_config.to_dict()
116
+ config = Idefics2Config(
117
+ text_config=text_config.to_dict(),
118
+ vision_config=config.vision_config,
119
+ perceiver_config=perceiver_config,
120
+ use_cache=config.use_cache,
121
+ image_token_id=config.image_token_id,
122
+ tie_word_embeddings=config.tie_word_embeddings,
123
+ )
124
+ return config
125
+
126
+ return AutoConfig.from_pretrained(checkpoint)
127
+
128
+
129
+ def convert_idefics2_hub_to_hf(original_model_id, output_hub_path, push_to_hub):
130
+ # The original model maps to AutoModelForCausalLM, converted we map to Idefics2ForConditionalGeneration
131
+ original_model = AutoModelForCausalLM.from_pretrained(original_model_id, trust_remote_code=True)
132
+ # The original model doesn't use the idefics2 processing objects
133
+ image_seq_len = original_model.config.perceiver_config.resampler_n_latents
134
+ image_processor = Idefics2ImageProcessor()
135
+ tokenizer = AutoTokenizer.from_pretrained(original_model_id)
136
+ processor = Idefics2Processor(
137
+ image_processor=image_processor,
138
+ tokenizer=tokenizer,
139
+ image_seq_len=image_seq_len,
140
+ )
141
+ state_dict = original_model.state_dict()
142
+ state_dict = convert_state_dict_to_hf(state_dict)
143
+
144
+ # Merge weights
145
+ state_dict = merge_weights(state_dict)
146
+
147
+ config = get_config(original_model_id)
148
+
149
+ with init_empty_weights():
150
+ model = Idefics2ForConditionalGeneration(config)
151
+
152
+ model.load_state_dict(state_dict, strict=True, assign=True)
153
+
154
+ model.save_pretrained(output_hub_path)
155
+ processor.save_pretrained(output_hub_path)
156
+
157
+ if push_to_hub:
158
+ model.push_to_hub(output_hub_path, private=True)
159
+ processor.push_to_hub(output_hub_path, private=True)
160
+
161
+
162
+ def main():
163
+ parser = argparse.ArgumentParser(
164
+ epilog=EPILOG_TXT,
165
+ formatter_class=argparse.RawDescriptionHelpFormatter,
166
+ )
167
+ parser.add_argument(
168
+ "--original_model_id",
169
+ help="Hub location of the text model",
170
+ )
171
+ parser.add_argument(
172
+ "--output_hub_path",
173
+ help="Location on the hub of the converted model",
174
+ )
175
+ parser.add_argument(
176
+ "--push_to_hub",
177
+ action="store_true",
178
+ help="If set, the model will be pushed to the hub after conversion.",
179
+ )
180
+ args = parser.parse_args()
181
+ convert_idefics2_hub_to_hf(args.original_model_id, args.output_hub_path, args.push_to_hub)
182
+
183
+
184
+ if __name__ == "__main__":
185
+ main()
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/image_processing_idefics2.py ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
22
+ from ...image_transforms import PaddingMode, pad, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ IMAGENET_STANDARD_MEAN,
25
+ IMAGENET_STANDARD_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ get_image_size,
30
+ infer_channel_dimension_format,
31
+ is_scaled_image,
32
+ is_valid_image,
33
+ to_numpy_array,
34
+ valid_images,
35
+ validate_preprocess_arguments,
36
+ )
37
+ from ...utils import TensorType, is_vision_available, logging
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ if is_vision_available():
44
+ import PIL
45
+ from PIL import Image
46
+
47
+
48
+ def get_resize_output_image_size(image, size, input_data_format) -> Tuple[int, int]:
49
+ """
50
+ Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
51
+
52
+ Args:
53
+ image (`np.ndarray`):
54
+ Image to resize.
55
+ size (`Dict[str, int]`):
56
+ Size of the output image containing the keys "shortest_edge" and "longest_edge".
57
+ input_data_format (`ChannelDimension` or `str`):
58
+ The channel dimension format of the input image.
59
+
60
+ Returns:
61
+ The output size of the image after resizing.
62
+ """
63
+ height, width = get_image_size(image, channel_dim=input_data_format)
64
+
65
+ min_len = size["shortest_edge"]
66
+ max_len = size["longest_edge"]
67
+ aspect_ratio = width / height
68
+
69
+ if width >= height and width > max_len:
70
+ width = max_len
71
+ height = int(width / aspect_ratio)
72
+ elif height > width and height > max_len:
73
+ height = max_len
74
+ width = int(height * aspect_ratio)
75
+ height = max(height, min_len)
76
+ width = max(width, min_len)
77
+ return height, width
78
+
79
+
80
+ def make_list_of_images(images: ImageInput) -> List[List[np.ndarray]]:
81
+ """
82
+ Convert a single image or a list of images to a list of numpy arrays.
83
+
84
+ Args:
85
+ images (`ImageInput`):
86
+ A single image or a list of images.
87
+
88
+ Returns:
89
+ A list of numpy arrays.
90
+ """
91
+ # If it's a single image, convert it to a list of lists
92
+ if is_valid_image(images):
93
+ images = [[images]]
94
+ # If it's a list of images, it's a single batch, so convert it to a list of lists
95
+ elif isinstance(images, (list, tuple)) and len(images) > 0 and is_valid_image(images[0]):
96
+ images = [images]
97
+ # If it's a list of batches, it's already in the right format
98
+ elif (
99
+ isinstance(images, (list, tuple))
100
+ and len(images) > 0
101
+ and isinstance(images[0], (list, tuple))
102
+ and is_valid_image(images[0][0])
103
+ ):
104
+ pass
105
+ else:
106
+ raise ValueError(
107
+ "Invalid input type. Must be a single image, a list of images, or a list of batches of images."
108
+ )
109
+ return images
110
+
111
+
112
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
113
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
114
+ """
115
+ Return the maximum value across all indices of an iterable of values.
116
+ """
117
+ return [max(values_i) for values_i in zip(*values)]
118
+
119
+
120
+ def get_max_height_width(
121
+ images_list: List[List[np.ndarray]], input_data_format: Optional[Union[str, ChannelDimension]] = None
122
+ ) -> List[int]:
123
+ """
124
+ Get the maximum height and width across all images in a batch.
125
+ """
126
+ if input_data_format is None:
127
+ input_data_format = infer_channel_dimension_format(images_list[0][0])
128
+
129
+ image_sizes = []
130
+ for images in images_list:
131
+ for image in images:
132
+ image_sizes.append(get_image_size(image, channel_dim=input_data_format))
133
+
134
+ max_height, max_width = max_across_indices(image_sizes)
135
+ return (max_height, max_width)
136
+
137
+
138
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
139
+ def make_pixel_mask(
140
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
141
+ ) -> np.ndarray:
142
+ """
143
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
144
+
145
+ Args:
146
+ image (`np.ndarray`):
147
+ Image to make the pixel mask for.
148
+ output_size (`Tuple[int, int]`):
149
+ Output size of the mask.
150
+ """
151
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
152
+ mask = np.zeros(output_size, dtype=np.int64)
153
+ mask[:input_height, :input_width] = 1
154
+ return mask
155
+
156
+
157
+ # FIXME Amy: merge this function with the one in image_transforms.py
158
+ def convert_to_rgb(image: ImageInput) -> ImageInput:
159
+ """
160
+ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
161
+ as is.
162
+ Args:
163
+ image (Image):
164
+ The image to convert.
165
+ """
166
+ if not isinstance(image, PIL.Image.Image):
167
+ return image
168
+
169
+ # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
170
+ # for transparent images. The call to `alpha_composite` handles this case
171
+ if image.mode == "RGB":
172
+ return image
173
+
174
+ image_rgba = image.convert("RGBA")
175
+ background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
176
+ alpha_composite = Image.alpha_composite(background, image_rgba)
177
+ alpha_composite = alpha_composite.convert("RGB")
178
+ return alpha_composite
179
+
180
+
181
+ class Idefics2ImageProcessor(BaseImageProcessor):
182
+ r"""
183
+ Constructs a Idefics image processor.
184
+
185
+ Args:
186
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
187
+ Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA.
188
+ Only has an effect if the input image is in the PIL format.
189
+ do_resize (`bool`, *optional*, defaults to `True`):
190
+ Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the
191
+ shortest edge resized to keep the input aspect ratio, with a minimum size of `size["shortest_edge"]`.
192
+ size (`Dict`, *optional*):
193
+ Controls the size of the output image. This is a dictionary containing the keys "shortest_edge" and "longest_edge".
194
+ resample (`Resampling`, *optional*, defaults to `Resampling.BILINEAR`):
195
+ Resampling filter to use when resizing the image.
196
+ do_rescale (`bool`, *optional*, defaults to `True`):
197
+ Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1.
198
+ rescale_factor (`float`, *optional*, defaults to `1/255`):
199
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
200
+ do_normalize (`bool`, *optional*, defaults to `True`):
201
+ Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and
202
+ a standard deviation of `image_std`.
203
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
204
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
205
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
206
+ overridden by the `image_mean` parameter in the `preprocess` method.
207
+ image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
208
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
209
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
210
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
211
+ do_pad (`bool`, *optional*, defaults to `True`):
212
+ Whether or not to pad the images to the largest height and width in the batch and number of images per
213
+ sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width).
214
+ do_image_splitting (`bool`, *optional*, defaults to `False`):
215
+ Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
216
+ strategy was first introduced in https://arxiv.org/abs/2311.06607.
217
+ """
218
+
219
+ model_input_names = ["pixel_values"]
220
+
221
+ def __init__(
222
+ self,
223
+ do_convert_rgb: bool = True,
224
+ do_resize: bool = True,
225
+ size: Dict[str, int] = None,
226
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
227
+ do_rescale: bool = True,
228
+ rescale_factor: float = 1 / 255,
229
+ do_normalize: bool = True,
230
+ image_mean: Optional[Union[float, List[float]]] = None,
231
+ image_std: Optional[Union[float, List[float]]] = None,
232
+ do_pad: bool = True,
233
+ do_image_splitting: bool = False,
234
+ **kwargs,
235
+ ) -> None:
236
+ super().__init__(**kwargs)
237
+ self.do_convert_rgb = do_convert_rgb
238
+ self.do_resize = do_resize
239
+ self.size = size if size is not None else {"shortest_edge": 378, "longest_edge": 980}
240
+ self.resample = resample
241
+ self.do_rescale = do_rescale
242
+ self.rescale_factor = rescale_factor
243
+ self.do_normalize = do_normalize
244
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
245
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
246
+ self.do_pad = do_pad
247
+ self.do_image_splitting = do_image_splitting
248
+
249
+ def resize(
250
+ self,
251
+ image: np.ndarray,
252
+ size: Dict[str, int],
253
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
254
+ data_format: Optional[Union[str, ChannelDimension]] = None,
255
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
256
+ **kwargs,
257
+ ) -> np.ndarray:
258
+ """
259
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
260
+ resized to keep the input aspect ratio.
261
+
262
+ Args:
263
+ image (`np.ndarray`):
264
+ Image to resize.
265
+ size (`Dict[str, int]`):
266
+ Size of the output image.
267
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
268
+ Resampling filter to use when resiizing the image.
269
+ data_format (`str` or `ChannelDimension`, *optional*):
270
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
271
+ input_data_format (`ChannelDimension` or `str`, *optional*):
272
+ The channel dimension format of the input image. If not provided, it will be inferred.
273
+ """
274
+ if "shortest_edge" in size and "longest_edge" in size:
275
+ size = get_resize_output_image_size(image, size, input_data_format)
276
+ elif "height" in size and "width" in size:
277
+ size = (size["height"], size["width"])
278
+ else:
279
+ raise ValueError(
280
+ "size must be a dictionary with keys 'shortest_edge' and 'longest_edge' or 'height' and 'width'."
281
+ )
282
+ return resize(
283
+ image, size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
284
+ )
285
+
286
+ # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image
287
+ def _pad_image(
288
+ self,
289
+ image: np.ndarray,
290
+ output_size: Tuple[int, int],
291
+ constant_values: Union[float, Iterable[float]] = 0,
292
+ data_format: Optional[ChannelDimension] = None,
293
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
294
+ ) -> np.ndarray:
295
+ """
296
+ Pad an image with zeros to the given size.
297
+ """
298
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
299
+ output_height, output_width = output_size
300
+
301
+ pad_bottom = output_height - input_height
302
+ pad_right = output_width - input_width
303
+ padding = ((0, pad_bottom), (0, pad_right))
304
+ padded_image = pad(
305
+ image,
306
+ padding,
307
+ mode=PaddingMode.CONSTANT,
308
+ constant_values=constant_values,
309
+ data_format=data_format,
310
+ input_data_format=input_data_format,
311
+ )
312
+ return padded_image
313
+
314
+ def pad(
315
+ self,
316
+ images: List[np.ndarray],
317
+ constant_values: Union[float, Iterable[float]] = 0,
318
+ return_pixel_mask: bool = True,
319
+ return_tensors: Optional[Union[str, TensorType]] = None,
320
+ data_format: Optional[ChannelDimension] = None,
321
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
322
+ ) -> BatchFeature:
323
+ """
324
+ For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width.
325
+ For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask.
326
+
327
+ Args:
328
+ images (`np.ndarray`):
329
+ List of list of images to pad. Pads to the largest height and width in the batch.
330
+ constant_values (`float` or `Iterable[float]`, *optional*):
331
+ The value to use for the padding if `mode` is `"constant"`.
332
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
333
+ Whether to return a pixel mask.
334
+ return_tensors (`str` or `TensorType`, *optional*):
335
+ The type of tensors to return. Can be one of:
336
+ - Unset: Return a list of `np.ndarray`.
337
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
338
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
339
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
340
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
341
+ data_format (`str` or `ChannelDimension`, *optional*):
342
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
343
+ input_data_format (`ChannelDimension` or `str`, *optional*):
344
+ The channel dimension format of the input image. If not provided, it will be inferred.
345
+ """
346
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
347
+
348
+ batch_size = len(images)
349
+ max_num_images = max(len(images_) for images_ in images)
350
+ input_data_format = (
351
+ infer_channel_dimension_format(images[0][0]) if input_data_format is None else input_data_format
352
+ )
353
+ data_format = input_data_format if data_format is None else data_format
354
+
355
+ def empty_image(size, input_data_format):
356
+ if input_data_format == ChannelDimension.FIRST:
357
+ return np.zeros((3, *size), dtype=np.uint8)
358
+ elif input_data_format == ChannelDimension.LAST:
359
+ return np.zeros((*size, 3), dtype=np.uint8)
360
+ raise ValueError("Invalid channel dimension format.")
361
+
362
+ padded_images_list = [
363
+ [empty_image(pad_size, data_format) for _ in range(max_num_images)] for _ in range(batch_size)
364
+ ]
365
+ padded_masks = [[np.zeros(pad_size) for _ in range(max_num_images)] for _ in range(batch_size)]
366
+
367
+ for batch_idx in range(batch_size):
368
+ for sample_idx, image in enumerate(images[batch_idx]):
369
+ padded_images_list[batch_idx][sample_idx] = self._pad_image(
370
+ image,
371
+ pad_size,
372
+ constant_values=constant_values,
373
+ data_format=data_format,
374
+ input_data_format=input_data_format,
375
+ )
376
+ padded_masks[batch_idx][sample_idx] = make_pixel_mask(
377
+ image, output_size=pad_size, input_data_format=input_data_format
378
+ )
379
+
380
+ padded_masks = padded_masks if return_pixel_mask else None
381
+ return padded_images_list, padded_masks
382
+
383
+ def _crop(
384
+ self,
385
+ im: np.ndarray,
386
+ w1: int,
387
+ h1: int,
388
+ w2: int,
389
+ h2: int,
390
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
391
+ ) -> np.ndarray:
392
+ if input_data_format == ChannelDimension.FIRST:
393
+ return im[:, h1:h2, w1:w2]
394
+ elif input_data_format == ChannelDimension.LAST:
395
+ return im[h1:h2, w1:w2, :]
396
+
397
+ def split_image(
398
+ self,
399
+ image: np.ndarray,
400
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
401
+ ):
402
+ """
403
+ Split an image into 4 equal sub-images, and the concatenate that sequence with the original image.
404
+ That means that a single image becomes a sequence of 5 images.
405
+ This is a "trick" to spend more compute on each image with no changes in the vision encoder.
406
+
407
+ Args:
408
+ image (`np.ndarray`):
409
+ Images to split.
410
+ input_data_format (`ChannelDimension` or `str`, *optional*):
411
+ The channel dimension format of the input image. If not provided, it will be inferred.
412
+ """
413
+ height, width = get_image_size(image, input_data_format)
414
+
415
+ mid_width = width // 2
416
+ mid_height = height // 2
417
+ return [
418
+ self._crop(image, 0, 0, mid_width, mid_height, input_data_format),
419
+ self._crop(image, mid_width, 0, width, mid_height, input_data_format),
420
+ self._crop(image, 0, mid_height, mid_width, height, input_data_format),
421
+ self._crop(image, mid_width, mid_height, width, height, input_data_format),
422
+ image,
423
+ ]
424
+
425
+ def preprocess(
426
+ self,
427
+ images: ImageInput,
428
+ do_convert_rgb: Optional[bool] = None,
429
+ do_resize: Optional[bool] = None,
430
+ size: Optional[Dict[str, int]] = None,
431
+ resample: PILImageResampling = None,
432
+ do_rescale: Optional[bool] = None,
433
+ rescale_factor: Optional[float] = None,
434
+ do_normalize: Optional[bool] = None,
435
+ image_mean: Optional[Union[float, List[float]]] = None,
436
+ image_std: Optional[Union[float, List[float]]] = None,
437
+ do_pad: Optional[bool] = None,
438
+ do_image_splitting: Optional[bool] = None,
439
+ return_tensors: Optional[Union[str, TensorType]] = None,
440
+ input_data_format: Optional[ChannelDimension] = None,
441
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
442
+ ):
443
+ """
444
+ Preprocess a batch of images.
445
+
446
+ Args:
447
+ images (`ImageInput`):
448
+ A list of images to preprocess.
449
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
450
+ Whether to convert the image to RGB.
451
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
452
+ Whether to resize the image.
453
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
454
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
455
+ the longest edge resized to keep the input aspect ratio.
456
+ resample (`int`, *optional*, defaults to `self.resample`):
457
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
458
+ has an effect if `do_resize` is set to `True`.
459
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
460
+ Whether to rescale the image.
461
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
462
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
463
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
464
+ Whether to normalize the image.
465
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
466
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
467
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
468
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
469
+ `True`.
470
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
471
+ Whether or not to pad the images to the largest height and width in the batch.
472
+ do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`):
473
+ Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. That
474
+ strategy was first introduced in https://arxiv.org/abs/2311.06607.
475
+ return_tensors (`str` or `TensorType`, *optional*):
476
+ The type of tensors to return. Can be one of:
477
+ - Unset: Return a list of `np.ndarray`.
478
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
479
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
480
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
481
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
482
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
483
+ The channel dimension format for the output image. Can be one of:
484
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
485
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
486
+ - Unset: Use the channel dimension format of the input image.
487
+ input_data_format (`ChannelDimension` or `str`, *optional*):
488
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
489
+ from the input image. Can be one of:
490
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
491
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
492
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
493
+ """
494
+ do_resize = do_resize if do_resize is not None else self.do_resize
495
+ size = size if size is not None else self.size
496
+ resample = resample if resample is not None else self.resample
497
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
498
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
499
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
500
+ image_mean = image_mean if image_mean is not None else self.image_mean
501
+ image_std = image_std if image_std is not None else self.image_std
502
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
503
+ do_pad = do_pad if do_pad is not None else self.do_pad
504
+ do_image_splitting = do_image_splitting if do_image_splitting is not None else self.do_image_splitting
505
+
506
+ images_list = make_list_of_images(images)
507
+
508
+ if not valid_images(images_list[0]):
509
+ raise ValueError(
510
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
511
+ "torch.Tensor, tf.Tensor or jax.ndarray."
512
+ )
513
+
514
+ validate_preprocess_arguments(
515
+ do_rescale=do_rescale,
516
+ rescale_factor=rescale_factor,
517
+ do_normalize=do_normalize,
518
+ image_mean=image_mean,
519
+ image_std=image_std,
520
+ do_resize=do_resize,
521
+ size=size,
522
+ resample=resample,
523
+ )
524
+
525
+ if do_convert_rgb:
526
+ images_list = [[convert_to_rgb(image) for image in images] for images in images_list]
527
+
528
+ # All transformations expect numpy arrays.
529
+ images_list = [[to_numpy_array(image) for image in images] for images in images_list]
530
+
531
+ if is_scaled_image(images_list[0][0]) and do_rescale:
532
+ logger.warning_once(
533
+ "It looks like you are trying to rescale already rescaled images. If the input"
534
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
535
+ )
536
+
537
+ if input_data_format is None:
538
+ # We assume that all images have the same channel dimension format.
539
+ input_data_format = infer_channel_dimension_format(images_list[0][0])
540
+
541
+ if do_image_splitting:
542
+ new_images_list = []
543
+ for images in images_list:
544
+ new_images = []
545
+ for image in images:
546
+ new_images.extend(self.split_image(image, input_data_format))
547
+ new_images_list.append(new_images)
548
+ images_list = new_images_list
549
+
550
+ if do_resize:
551
+ images_list = [
552
+ [
553
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
554
+ for image in images
555
+ ]
556
+ for images in images_list
557
+ ]
558
+
559
+ if do_rescale:
560
+ images_list = [
561
+ [
562
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
563
+ for image in images
564
+ ]
565
+ for images in images_list
566
+ ]
567
+
568
+ if do_normalize:
569
+ images_list = [
570
+ [
571
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
572
+ for image in images
573
+ ]
574
+ for images in images_list
575
+ ]
576
+
577
+ pixel_attention_mask = None
578
+ if do_pad:
579
+ images_list, pixel_attention_mask = self.pad(
580
+ images_list, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=input_data_format
581
+ )
582
+
583
+ if data_format is not None:
584
+ images_list = [
585
+ [
586
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
587
+ for image in images
588
+ ]
589
+ for images in images_list
590
+ ]
591
+
592
+ data = {"pixel_values": np.array(images_list) if do_pad else images_list} # Faster tensor conversion
593
+ if pixel_attention_mask is not None:
594
+ data["pixel_attention_mask"] = np.array(pixel_attention_mask) if do_pad else pixel_attention_mask
595
+
596
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/modeling_idefics2.py ADDED
@@ -0,0 +1,1956 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Idefics2 model."""
16
+
17
+ import inspect
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss
27
+
28
+ from ... import PreTrainedModel
29
+ from ...activations import ACT2FN
30
+ from ...cache_utils import Cache, DynamicCache
31
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
32
+ from ...modeling_outputs import BaseModelOutput, ModelOutput
33
+ from ...utils import (
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ is_flash_attn_2_available,
37
+ is_flash_attn_greater_or_equal_2_10,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from ..auto import AutoModel
42
+ from .configuration_idefics2 import Idefics2Config, Idefics2VisionConfig
43
+
44
+
45
+ if is_flash_attn_2_available():
46
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
47
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
48
+
49
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+ _CONFIG_FOR_DOC = "Idefics2Config"
55
+
56
+ IDEFICS2_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "HuggingFaceM4/idefics2-8b",
58
+ # See all IDEFICS2 models at https://huggingface.co/models?filter=idefics2
59
+ ]
60
+
61
+
62
+ @dataclass
63
+ class Idefics2BaseModelOutputWithPast(ModelOutput):
64
+ """
65
+ Base class for Idefics2 model's outputs that may also contain a past key/values (to speed up sequential decoding).
66
+ Args:
67
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
68
+ Sequence of hidden-states at the output of the last layer of the model.
69
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
70
+ hidden_size)` is output.
71
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
72
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
73
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
74
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
75
+ encoder_sequence_length, embed_size_per_head)`.
76
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
77
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
78
+ input) to speed up sequential decoding.
79
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
80
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
81
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
82
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
83
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
84
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
85
+ sequence_length)`.
86
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
87
+ heads.
88
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
89
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
90
+ sequence_length, hidden_size)`.
91
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
92
+ """
93
+
94
+ last_hidden_state: torch.FloatTensor = None
95
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
96
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
97
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
98
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
99
+
100
+
101
+ @dataclass
102
+ # Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->Idefics2
103
+ class Idefics2CausalLMOutputWithPast(ModelOutput):
104
+ """
105
+ Base class for Idefics2 causal language model (or autoregressive) outputs.
106
+ Args:
107
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
108
+ Language modeling loss (for next-token prediction).
109
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
110
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
111
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
112
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
113
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
114
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
115
+ `past_key_values` input) to speed up sequential decoding.
116
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
117
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
118
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
119
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
120
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
121
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
122
+ sequence_length)`.
123
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
124
+ heads.
125
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
126
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
127
+ sequence_length, hidden_size)`.
128
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
129
+ """
130
+
131
+ loss: Optional[torch.FloatTensor] = None
132
+ logits: torch.FloatTensor = None
133
+ past_key_values: Optional[List[torch.FloatTensor]] = None
134
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
135
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
136
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
137
+
138
+
139
+ class Idefics2VisionEmbeddings(nn.Module):
140
+ """
141
+ This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
142
+ resolution.
143
+
144
+ The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://arxiv.org/abs/2307.06304)
145
+ which allows treating images in their native aspect ratio and without the need to resize them to the same
146
+ fixed size. In particular, we start from the original pre-trained SigLIP model
147
+ (which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
148
+ """
149
+
150
+ def __init__(self, config: Idefics2VisionConfig):
151
+ super().__init__()
152
+ self.embed_dim = config.hidden_size
153
+ self.image_size = config.image_size
154
+ self.patch_size = config.patch_size
155
+
156
+ self.patch_embedding = nn.Conv2d(
157
+ in_channels=config.num_channels,
158
+ out_channels=self.embed_dim,
159
+ kernel_size=self.patch_size,
160
+ stride=self.patch_size,
161
+ padding="valid",
162
+ )
163
+
164
+ self.num_patches_per_side = self.image_size // self.patch_size
165
+ self.num_patches = self.num_patches_per_side**2
166
+ self.num_positions = self.num_patches
167
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
168
+
169
+ def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
170
+ batch_size, _, max_im_h, max_im_w = pixel_values.shape
171
+
172
+ patch_embeds = self.patch_embedding(pixel_values)
173
+ embeddings = patch_embeds.flatten(2).transpose(1, 2)
174
+
175
+ max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size
176
+ boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side)
177
+ position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0)
178
+
179
+ for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
180
+ nb_patches_h = p_attn_mask[:, 0].sum()
181
+ nb_patches_w = p_attn_mask[0].sum()
182
+
183
+ fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
184
+ fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
185
+
186
+ bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
187
+ bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
188
+
189
+ pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
190
+ position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
191
+
192
+ position_ids = position_ids.to(self.position_embedding.weight.device)
193
+ embeddings = embeddings + self.position_embedding(position_ids)
194
+ return embeddings
195
+
196
+
197
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipAttention with Siglip->Idefics2Vision
198
+ class Idefics2VisionAttention(nn.Module):
199
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
200
+
201
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention.__init__
202
+ def __init__(self, config):
203
+ super().__init__()
204
+ self.config = config
205
+ self.embed_dim = config.hidden_size
206
+ self.num_heads = config.num_attention_heads
207
+ self.head_dim = self.embed_dim // self.num_heads
208
+ if self.head_dim * self.num_heads != self.embed_dim:
209
+ raise ValueError(
210
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
211
+ f" {self.num_heads})."
212
+ )
213
+ self.scale = self.head_dim**-0.5
214
+ self.dropout = config.attention_dropout
215
+
216
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
217
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
218
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
219
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
220
+
221
+ # Ignore copy
222
+ self.is_causal = False
223
+
224
+ def forward(
225
+ self,
226
+ hidden_states: torch.Tensor,
227
+ attention_mask: Optional[torch.Tensor] = None,
228
+ output_attentions: Optional[bool] = False,
229
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
230
+ """Input shape: Batch x Time x Channel"""
231
+
232
+ batch_size, q_len, _ = hidden_states.size()
233
+
234
+ query_states = self.q_proj(hidden_states)
235
+ key_states = self.k_proj(hidden_states)
236
+ value_states = self.v_proj(hidden_states)
237
+
238
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
239
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
240
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
241
+
242
+ k_v_seq_len = key_states.shape[-2]
243
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
244
+
245
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
246
+ raise ValueError(
247
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
248
+ f" {attn_weights.size()}"
249
+ )
250
+
251
+ if attention_mask is not None:
252
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
253
+ raise ValueError(
254
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
255
+ )
256
+ attn_weights = attn_weights + attention_mask
257
+
258
+ # upcast attention to fp32
259
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
260
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
261
+ attn_output = torch.matmul(attn_weights, value_states)
262
+
263
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
264
+ raise ValueError(
265
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
266
+ f" {attn_output.size()}"
267
+ )
268
+
269
+ attn_output = attn_output.transpose(1, 2).contiguous()
270
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
271
+
272
+ attn_output = self.out_proj(attn_output)
273
+
274
+ return attn_output, attn_weights
275
+
276
+
277
+ class Idefics2VisionFlashAttention2(Idefics2VisionAttention):
278
+ """
279
+ Idefics2Vision flash attention module. This module inherits from `Idefics2VisionAttention` as the weights of the module stays
280
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
281
+ flash attention and deal with padding tokens in case the input contains any of them.
282
+ """
283
+
284
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
285
+ def __init__(self, *args, **kwargs):
286
+ super().__init__(*args, **kwargs)
287
+
288
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
289
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
290
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
291
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
292
+
293
+ def forward(
294
+ self,
295
+ hidden_states: torch.Tensor,
296
+ attention_mask: Optional[torch.LongTensor] = None,
297
+ position_ids: Optional[torch.LongTensor] = None,
298
+ past_key_value: Optional[Cache] = None,
299
+ output_attentions: bool = False,
300
+ use_cache: bool = False,
301
+ **kwargs,
302
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
303
+ output_attentions = False
304
+
305
+ bsz, q_len, _ = hidden_states.size()
306
+
307
+ query_states = self.q_proj(hidden_states)
308
+ key_states = self.k_proj(hidden_states)
309
+ value_states = self.v_proj(hidden_states)
310
+
311
+ # Flash attention requires the input to have the shape
312
+ # batch_size x seq_length x head_dim x hidden_dim
313
+ # therefore we just need to keep the original shape
314
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
315
+ key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
316
+ value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
317
+
318
+ kv_seq_len = key_states.shape[-2]
319
+ if past_key_value is not None:
320
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
321
+
322
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
323
+ # to be able to avoid many of these transpose/reshape/view.
324
+ query_states = query_states.transpose(1, 2)
325
+ key_states = key_states.transpose(1, 2)
326
+ value_states = value_states.transpose(1, 2)
327
+
328
+ dropout_rate = self.dropout if self.training else 0.0
329
+
330
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
331
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
332
+ # cast them back in the correct dtype just to be sure everything works as expected.
333
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
334
+ # in fp32. (Idefics2VisionRMSNorm handles it correctly)
335
+
336
+ input_dtype = query_states.dtype
337
+ if input_dtype == torch.float32:
338
+ if torch.is_autocast_enabled():
339
+ target_dtype = torch.get_autocast_gpu_dtype()
340
+ # Handle the case where the model is quantized
341
+ elif hasattr(self.config, "_pre_quantization_dtype"):
342
+ target_dtype = self.config._pre_quantization_dtype
343
+ else:
344
+ target_dtype = self.q_proj.weight.dtype
345
+
346
+ logger.warning_once(
347
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
348
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
349
+ f" {target_dtype}."
350
+ )
351
+
352
+ query_states = query_states.to(target_dtype)
353
+ key_states = key_states.to(target_dtype)
354
+ value_states = value_states.to(target_dtype)
355
+
356
+ attn_output = self._flash_attention_forward(
357
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
358
+ )
359
+
360
+ attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous()
361
+ attn_output = self.out_proj(attn_output)
362
+
363
+ if not output_attentions:
364
+ attn_weights = None
365
+
366
+ return attn_output, attn_weights
367
+
368
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
369
+ def _flash_attention_forward(
370
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
371
+ ):
372
+ """
373
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
374
+ first unpad the input, then computes the attention scores and pad the final attention scores.
375
+
376
+ Args:
377
+ query_states (`torch.Tensor`):
378
+ Input query states to be passed to Flash Attention API
379
+ key_states (`torch.Tensor`):
380
+ Input key states to be passed to Flash Attention API
381
+ value_states (`torch.Tensor`):
382
+ Input value states to be passed to Flash Attention API
383
+ attention_mask (`torch.Tensor`):
384
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
385
+ position of padding tokens and 1 for the position of non-padding tokens.
386
+ dropout (`float`):
387
+ Attention dropout
388
+ softmax_scale (`float`, *optional*):
389
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
390
+ """
391
+ if not self._flash_attn_uses_top_left_mask:
392
+ causal = self.is_causal
393
+ else:
394
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
395
+ causal = self.is_causal and query_length != 1
396
+
397
+ # Contains at least one padding token in the sequence
398
+ if attention_mask is not None:
399
+ batch_size = query_states.shape[0]
400
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
401
+ query_states, key_states, value_states, attention_mask, query_length
402
+ )
403
+
404
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
405
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
406
+
407
+ attn_output_unpad = flash_attn_varlen_func(
408
+ query_states,
409
+ key_states,
410
+ value_states,
411
+ cu_seqlens_q=cu_seqlens_q,
412
+ cu_seqlens_k=cu_seqlens_k,
413
+ max_seqlen_q=max_seqlen_in_batch_q,
414
+ max_seqlen_k=max_seqlen_in_batch_k,
415
+ dropout_p=dropout,
416
+ softmax_scale=softmax_scale,
417
+ causal=causal,
418
+ )
419
+
420
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
421
+ else:
422
+ attn_output = flash_attn_func(
423
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
424
+ )
425
+
426
+ return attn_output
427
+
428
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
429
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
430
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
431
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
432
+
433
+ key_layer = index_first_axis(
434
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
435
+ )
436
+ value_layer = index_first_axis(
437
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
438
+ )
439
+ if query_length == kv_seq_len:
440
+ query_layer = index_first_axis(
441
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
442
+ )
443
+ cu_seqlens_q = cu_seqlens_k
444
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
445
+ indices_q = indices_k
446
+ elif query_length == 1:
447
+ max_seqlen_in_batch_q = 1
448
+ cu_seqlens_q = torch.arange(
449
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
450
+ ) # There is a memcpy here, that is very bad.
451
+ indices_q = cu_seqlens_q[:-1]
452
+ query_layer = query_layer.squeeze(1)
453
+ else:
454
+ # The -q_len: slice assumes left padding.
455
+ attention_mask = attention_mask[:, -query_length:]
456
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
457
+
458
+ return (
459
+ query_layer,
460
+ key_layer,
461
+ value_layer,
462
+ indices_q,
463
+ (cu_seqlens_q, cu_seqlens_k),
464
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
465
+ )
466
+
467
+
468
+ IDEFICS_VISION_ATTENTION_CLASSES = {
469
+ "eager": Idefics2VisionAttention,
470
+ "flash_attention_2": Idefics2VisionFlashAttention2,
471
+ }
472
+
473
+
474
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipMLP with Siglip->Idefics2Vision
475
+ class Idefics2VisionMLP(nn.Module):
476
+ def __init__(self, config):
477
+ super().__init__()
478
+ self.config = config
479
+ self.activation_fn = ACT2FN[config.hidden_act]
480
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
481
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
482
+
483
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
484
+ hidden_states = self.fc1(hidden_states)
485
+ hidden_states = self.activation_fn(hidden_states)
486
+ hidden_states = self.fc2(hidden_states)
487
+ return hidden_states
488
+
489
+
490
+ class Idefics2MLP(nn.Module):
491
+ def __init__(
492
+ self,
493
+ hidden_size: int,
494
+ intermediate_size: int,
495
+ output_size: int,
496
+ hidden_act: str,
497
+ ):
498
+ super().__init__()
499
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
500
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
501
+ self.down_proj = nn.Linear(intermediate_size, output_size, bias=False)
502
+ self.act_fn = ACT2FN[hidden_act]
503
+
504
+ def forward(self, x):
505
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
506
+
507
+
508
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipMultiheadAttentionPoolingHead with Siglip->Idefics2
509
+ class Idefics2MultiheadAttentionPoolingHead(nn.Module):
510
+ """Multihead Attention Pooling."""
511
+
512
+ def __init__(self, config: Idefics2VisionConfig):
513
+ super().__init__()
514
+
515
+ self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
516
+ self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
517
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
518
+ # Ignore copy
519
+ self.mlp = Idefics2MLP(
520
+ hidden_size=config.hidden_size,
521
+ intermediate_size=config.intermediate_size,
522
+ hidden_act=config.hidden_act,
523
+ output_size=config.hidden_size,
524
+ )
525
+
526
+ def forward(self, hidden_state):
527
+ batch_size = hidden_state.shape[0]
528
+ probe = self.probe.repeat(batch_size, 1, 1)
529
+
530
+ hidden_state = self.attention(probe, hidden_state, hidden_state)[0]
531
+
532
+ residual = hidden_state
533
+ hidden_state = self.layernorm(hidden_state)
534
+ hidden_state = residual + self.mlp(hidden_state)
535
+
536
+ return hidden_state[:, 0]
537
+
538
+
539
+ class Idefics2EncoderLayer(nn.Module):
540
+ def __init__(self, config: Idefics2Config):
541
+ super().__init__()
542
+ self.embed_dim = config.hidden_size
543
+ self.self_attn = IDEFICS_VISION_ATTENTION_CLASSES[config._attn_implementation](config)
544
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
545
+ self.mlp = Idefics2VisionMLP(config)
546
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
547
+
548
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoderLayer.forward
549
+ def forward(
550
+ self,
551
+ hidden_states: torch.Tensor,
552
+ attention_mask: torch.Tensor,
553
+ output_attentions: Optional[bool] = False,
554
+ ) -> Tuple[torch.FloatTensor]:
555
+ """
556
+ Args:
557
+ hidden_states (`torch.FloatTensor`):
558
+ Input to the layer of shape `(batch, seq_len, embed_dim)`.
559
+ attention_mask (`torch.FloatTensor`):
560
+ Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
561
+ output_attentions (`bool`, *optional*, defaults to `False`):
562
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
563
+ returned tensors for more detail.
564
+ """
565
+ residual = hidden_states
566
+
567
+ hidden_states = self.layer_norm1(hidden_states)
568
+ hidden_states, attn_weights = self.self_attn(
569
+ hidden_states=hidden_states,
570
+ attention_mask=attention_mask,
571
+ output_attentions=output_attentions,
572
+ )
573
+ hidden_states = residual + hidden_states
574
+
575
+ residual = hidden_states
576
+ hidden_states = self.layer_norm2(hidden_states)
577
+ hidden_states = self.mlp(hidden_states)
578
+ hidden_states = residual + hidden_states
579
+
580
+ outputs = (hidden_states,)
581
+
582
+ if output_attentions:
583
+ outputs += (attn_weights,)
584
+
585
+ return outputs
586
+
587
+
588
+ # Copied from transformers.models.siglip.modeling_siglip.SiglipEncoder with Siglip->Idefics2
589
+ class Idefics2Encoder(nn.Module):
590
+ """
591
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
592
+ [`Idefics2EncoderLayer`].
593
+
594
+ Args:
595
+ config: Idefics2Config
596
+ """
597
+
598
+ def __init__(self, config: Idefics2Config):
599
+ super().__init__()
600
+ self.config = config
601
+ self.layers = nn.ModuleList([Idefics2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
602
+ self.gradient_checkpointing = False
603
+
604
+ # Ignore copy
605
+ def forward(
606
+ self,
607
+ inputs_embeds,
608
+ attention_mask: Optional[torch.Tensor] = None,
609
+ output_attentions: Optional[bool] = None,
610
+ output_hidden_states: Optional[bool] = None,
611
+ return_dict: Optional[bool] = None,
612
+ ) -> Union[Tuple, BaseModelOutput]:
613
+ r"""
614
+ Args:
615
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
616
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
617
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
618
+ than the model's internal embedding lookup matrix.
619
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
620
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
621
+
622
+ - 1 for tokens that are **not masked**,
623
+ - 0 for tokens that are **masked**.
624
+
625
+ [What are attention masks?](../glossary#attention-mask)
626
+ output_attentions (`bool`, *optional*):
627
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
628
+ returned tensors for more detail.
629
+ output_hidden_states (`bool`, *optional*):
630
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
631
+ for more detail.
632
+ return_dict (`bool`, *optional*):
633
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
634
+ """
635
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
636
+ output_hidden_states = (
637
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
638
+ )
639
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
640
+
641
+ encoder_states = () if output_hidden_states else None
642
+ all_attentions = () if output_attentions else None
643
+
644
+ hidden_states = inputs_embeds
645
+ for encoder_layer in self.layers:
646
+ if output_hidden_states:
647
+ encoder_states = encoder_states + (hidden_states,)
648
+ if self.gradient_checkpointing and self.training:
649
+ layer_outputs = self._gradient_checkpointing_func(
650
+ encoder_layer.__call__,
651
+ hidden_states,
652
+ attention_mask,
653
+ output_attentions,
654
+ )
655
+ else:
656
+ layer_outputs = encoder_layer(
657
+ hidden_states,
658
+ attention_mask,
659
+ output_attentions=output_attentions,
660
+ )
661
+
662
+ hidden_states = layer_outputs[0]
663
+
664
+ if output_attentions:
665
+ all_attentions = all_attentions + (layer_outputs[1],)
666
+
667
+ if output_hidden_states:
668
+ encoder_states = encoder_states + (hidden_states,)
669
+
670
+ if not return_dict:
671
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
672
+ return BaseModelOutput(
673
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
674
+ )
675
+
676
+
677
+ class Idefics2VisionTransformer(nn.Module):
678
+ def __init__(self, config: Idefics2VisionConfig):
679
+ super().__init__()
680
+ embed_dim = config.hidden_size
681
+
682
+ self.config = config
683
+ self.embeddings = Idefics2VisionEmbeddings(config)
684
+ self.encoder = Idefics2Encoder(config)
685
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
686
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
687
+
688
+ def get_input_embeddings(self):
689
+ return self.embeddings
690
+
691
+ def set_input_embeddings(self, value):
692
+ self.embeddings = value
693
+
694
+ def forward(
695
+ self,
696
+ pixel_values,
697
+ patch_attention_mask: Optional[torch.BoolTensor] = None,
698
+ output_attentions: Optional[bool] = None,
699
+ output_hidden_states: Optional[bool] = None,
700
+ return_dict: Optional[bool] = None,
701
+ ) -> Union[Tuple, BaseModelOutput]:
702
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
703
+ output_hidden_states = (
704
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
705
+ )
706
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
707
+
708
+ batch_size = pixel_values.size(0)
709
+ if patch_attention_mask is None:
710
+ patch_size = self.config.patch_size
711
+ patch_attention_mask = torch.ones(
712
+ (
713
+ batch_size,
714
+ pixel_values.size(2) // patch_size,
715
+ pixel_values.size(3) // patch_size,
716
+ )
717
+ )
718
+ patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device)
719
+
720
+ hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
721
+
722
+ patch_attention_mask = patch_attention_mask.view(batch_size, -1)
723
+ # The call to `_upad_input` in `_flash_attention_forward` is expensive
724
+ # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence),
725
+ # avoiding passing the attention_mask, which is equivalent to attending to the full sequence
726
+ if not torch.any(~patch_attention_mask):
727
+ patch_attention_mask = None
728
+ elif not self._use_flash_attention_2:
729
+ patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
730
+
731
+ encoder_outputs = self.encoder(
732
+ inputs_embeds=hidden_states,
733
+ attention_mask=patch_attention_mask,
734
+ output_attentions=output_attentions,
735
+ output_hidden_states=output_hidden_states,
736
+ return_dict=return_dict,
737
+ )
738
+
739
+ last_hidden_state = encoder_outputs[0]
740
+ last_hidden_state = self.post_layernorm(last_hidden_state)
741
+
742
+ if not return_dict:
743
+ return (last_hidden_state,) + encoder_outputs[1:]
744
+
745
+ return BaseModelOutput(
746
+ last_hidden_state=last_hidden_state,
747
+ hidden_states=encoder_outputs.hidden_states,
748
+ attentions=encoder_outputs.attentions,
749
+ )
750
+
751
+
752
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
753
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
754
+ """
755
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
756
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
757
+ """
758
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
759
+ if n_rep == 1:
760
+ return hidden_states
761
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
762
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
763
+
764
+
765
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
766
+ def _get_unpad_data(attention_mask):
767
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
768
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
769
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
770
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
771
+ return (
772
+ indices,
773
+ cu_seqlens,
774
+ max_seqlen_in_batch,
775
+ )
776
+
777
+
778
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Idefics2
779
+ class Idefics2RMSNorm(nn.Module):
780
+ def __init__(self, hidden_size, eps=1e-6):
781
+ """
782
+ Idefics2RMSNorm is equivalent to T5LayerNorm
783
+ """
784
+ super().__init__()
785
+ self.weight = nn.Parameter(torch.ones(hidden_size))
786
+ self.variance_epsilon = eps
787
+
788
+ def forward(self, hidden_states):
789
+ input_dtype = hidden_states.dtype
790
+ hidden_states = hidden_states.to(torch.float32)
791
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
792
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
793
+ return self.weight * hidden_states.to(input_dtype)
794
+
795
+
796
+ class Idefics2PerceiverAttention(nn.Module):
797
+ def __init__(self, config, layer_idx: Optional[int] = None) -> None:
798
+ """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
799
+ super().__init__()
800
+
801
+ self.layer_idx = None
802
+ self.hidden_size = config.text_config.hidden_size
803
+ self.num_heads = config.perceiver_config.resampler_n_heads
804
+ self.head_dim = config.perceiver_config.resampler_head_dim
805
+ self.num_key_value_heads = config.perceiver_config.num_key_value_heads
806
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
807
+ self.attention_dropout = config.perceiver_config.attention_dropout
808
+
809
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
810
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
811
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
812
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
813
+
814
+ self.is_causal = False
815
+
816
+ def forward(
817
+ self,
818
+ latents: torch.Tensor,
819
+ context: torch.Tensor,
820
+ attention_mask: Optional[torch.Tensor] = None,
821
+ position_ids: Optional[torch.LongTensor] = None,
822
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
823
+ output_attentions: bool = False,
824
+ use_cache: bool = False,
825
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
826
+ """
827
+ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
828
+
829
+ Args:
830
+ latents (`torch.Tensor`): Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to compress to.
831
+ context (`torch.Tensor`): Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample.
832
+ attention_mask (`torch.Tensor`, *optional*): Tensor of shape [bsz, 1, seq, n_latents] representing attention mask.
833
+ position_ids (`torch.LongTensor`, *optional*): Tensor of shape [bsz, seq] representing position indices of each input token.
834
+ past_key_value (`Tuple[torch.Tensor]`, *optional*): Tuple of tensors containing cached key and value states.
835
+ output_attentions (`bool`, *optional*, defaults to `False`): Whether to return attention weights.
836
+ use_cache (`bool`, *optional*, defaults to `False`): Whether to use past_key_value for caching.
837
+ """
838
+ bsz, q_len, _ = latents.size()
839
+ kv_seq_len = q_len + context.size()[1]
840
+
841
+ hidden_states = torch.concat([context, latents], dim=-2)
842
+
843
+ query_states = self.q_proj(latents)
844
+ key_states = self.k_proj(hidden_states)
845
+ value_states = self.v_proj(hidden_states)
846
+
847
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
848
+ key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
849
+ value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
850
+
851
+ past_key_value = getattr(self, "past_key_value", past_key_value)
852
+
853
+ if past_key_value is not None:
854
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
855
+
856
+ # repeat k/v heads if n_kv_heads < n_heads
857
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
858
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
859
+
860
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
861
+
862
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
863
+ raise ValueError(
864
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
865
+ f" {attn_weights.size()}"
866
+ )
867
+
868
+ if attention_mask is not None:
869
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
870
+ raise ValueError(
871
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
872
+ )
873
+
874
+ attn_weights = attn_weights + attention_mask
875
+
876
+ # upcast attention to fp32
877
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
878
+ attn_output = torch.matmul(attn_weights, value_states)
879
+
880
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
881
+ raise ValueError(
882
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
883
+ f" {attn_output.size()}"
884
+ )
885
+
886
+ attn_output = attn_output.transpose(1, 2).contiguous()
887
+ attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim)
888
+
889
+ attn_output = self.o_proj(attn_output)
890
+
891
+ if not output_attentions:
892
+ attn_weights = None
893
+
894
+ return attn_output, attn_weights, past_key_value
895
+
896
+
897
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with MistralAttention->Idefics2PerceiverAttention,MistralFlashAttention->Idefics2PerceiverFlashAttention,Mistral->Idefics2
898
+ class Idefics2PerceiverFlashAttention2(Idefics2PerceiverAttention):
899
+ """
900
+ Idefics2 flash attention module. This module inherits from `Idefics2PerceiverAttention` as the weights of the module stays
901
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
902
+ flash attention and deal with padding tokens in case the input contains any of them.
903
+ """
904
+
905
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
906
+ def __init__(self, *args, **kwargs):
907
+ super().__init__(*args, **kwargs)
908
+
909
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
910
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
911
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
912
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
913
+
914
+ # Ignore copy
915
+ def forward(
916
+ self,
917
+ latents: torch.Tensor,
918
+ context: torch.Tensor,
919
+ attention_mask: Optional[torch.LongTensor] = None,
920
+ position_ids: Optional[torch.LongTensor] = None,
921
+ past_key_value: Optional[Cache] = None,
922
+ output_attentions: bool = False,
923
+ use_cache: bool = False,
924
+ **kwargs,
925
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
926
+ bsz, q_len, _ = latents.size()
927
+ kv_seq_len = q_len + context.size()[1]
928
+
929
+ # Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
930
+ # Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
931
+ query_states = self.q_proj(latents)
932
+ key_states = self.k_proj(torch.cat([context, latents], dim=-2))
933
+ value_states = self.v_proj(torch.cat([context, latents], dim=-2))
934
+
935
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
936
+ key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
937
+ value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
938
+
939
+ kv_seq_len = key_states.shape[-2]
940
+ if past_key_value is not None:
941
+ kv_seq_len += past_key_value[0].shape[-2]
942
+
943
+ if past_key_value is not None:
944
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
945
+ if hasattr(self.config, "sliding_window") and kv_seq_len > self.config.sliding_window:
946
+ slicing_tokens = kv_seq_len - self.config.sliding_window
947
+
948
+ past_key = past_key_value[0]
949
+ past_value = past_key_value[1]
950
+
951
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
952
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
953
+
954
+ if past_key.shape[-2] != self.config.sliding_window - 1:
955
+ raise ValueError(
956
+ "past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1,"
957
+ f" head_dim`), got {past_key.shape}"
958
+ )
959
+
960
+ past_key_value = (past_key, past_value)
961
+
962
+ if attention_mask is not None:
963
+ attention_mask = attention_mask[:, slicing_tokens:]
964
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
965
+
966
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
967
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
968
+
969
+ past_key_value = (key_states, value_states) if use_cache else None
970
+
971
+ # repeat k/v heads if n_kv_heads < n_heads
972
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
973
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
974
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
975
+
976
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
977
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
978
+ # cast them back in float16 just to be sure everything works as expected.
979
+ input_dtype = query_states.dtype
980
+ if input_dtype == torch.float32:
981
+ if torch.is_autocast_enabled():
982
+ target_dtype = torch.get_autocast_gpu_dtype()
983
+ # Handle the case where the model is quantized
984
+ elif hasattr(self.config, "_pre_quantization_dtype"):
985
+ target_dtype = self.config._pre_quantization_dtype
986
+ else:
987
+ target_dtype = self.q_proj.weight.dtype
988
+
989
+ logger.warning_once(
990
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
991
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
992
+ f" {target_dtype}."
993
+ )
994
+
995
+ query_states = query_states.to(target_dtype)
996
+ key_states = key_states.to(target_dtype)
997
+ value_states = value_states.to(target_dtype)
998
+
999
+ # Reashape to the expected shape for Flash Attention
1000
+ query_states = query_states.transpose(1, 2)
1001
+ key_states = key_states.transpose(1, 2)
1002
+ value_states = value_states.transpose(1, 2)
1003
+
1004
+ attn_output = self._flash_attention_forward(
1005
+ query_states,
1006
+ key_states,
1007
+ value_states,
1008
+ attention_mask,
1009
+ q_len,
1010
+ dropout=dropout_rate,
1011
+ use_sliding_windows=False,
1012
+ )
1013
+
1014
+ attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous()
1015
+ attn_output = self.o_proj(attn_output)
1016
+
1017
+ if not output_attentions:
1018
+ attn_weights = None
1019
+
1020
+ return attn_output, attn_weights, past_key_value
1021
+
1022
+ def _flash_attention_forward(
1023
+ self,
1024
+ query_states,
1025
+ key_states,
1026
+ value_states,
1027
+ attention_mask,
1028
+ query_length,
1029
+ dropout=0.0,
1030
+ softmax_scale=None,
1031
+ use_sliding_windows=False,
1032
+ ):
1033
+ """
1034
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
1035
+ first unpad the input, then computes the attention scores and pad the final attention scores.
1036
+
1037
+ Args:
1038
+ query_states (`torch.Tensor`):
1039
+ Input query states to be passed to Flash Attention API
1040
+ key_states (`torch.Tensor`):
1041
+ Input key states to be passed to Flash Attention API
1042
+ value_states (`torch.Tensor`):
1043
+ Input value states to be passed to Flash Attention API
1044
+ attention_mask (`torch.Tensor`):
1045
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
1046
+ position of padding tokens and 1 for the position of non-padding tokens.
1047
+ dropout (`float`):
1048
+ Attention dropout
1049
+ softmax_scale (`float`, *optional*):
1050
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
1051
+ use_sliding_windows (`bool`, *optional*):
1052
+ Whether to activate sliding window attention.
1053
+ """
1054
+ if not self._flash_attn_uses_top_left_mask:
1055
+ causal = self.is_causal
1056
+ else:
1057
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
1058
+ causal = self.is_causal and query_length != 1
1059
+
1060
+ # Contains at least one padding token in the sequence
1061
+ if attention_mask is not None:
1062
+ batch_size = query_states.shape[0]
1063
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
1064
+ query_states, key_states, value_states, attention_mask, query_length
1065
+ )
1066
+
1067
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
1068
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
1069
+
1070
+ if not use_sliding_windows:
1071
+ attn_output_unpad = flash_attn_varlen_func(
1072
+ query_states,
1073
+ key_states,
1074
+ value_states,
1075
+ cu_seqlens_q=cu_seqlens_q,
1076
+ cu_seqlens_k=cu_seqlens_k,
1077
+ max_seqlen_q=max_seqlen_in_batch_q,
1078
+ max_seqlen_k=max_seqlen_in_batch_k,
1079
+ dropout_p=dropout,
1080
+ softmax_scale=softmax_scale,
1081
+ causal=causal,
1082
+ )
1083
+ else:
1084
+ attn_output_unpad = flash_attn_varlen_func(
1085
+ query_states,
1086
+ key_states,
1087
+ value_states,
1088
+ cu_seqlens_q=cu_seqlens_q,
1089
+ cu_seqlens_k=cu_seqlens_k,
1090
+ max_seqlen_q=max_seqlen_in_batch_q,
1091
+ max_seqlen_k=max_seqlen_in_batch_k,
1092
+ dropout_p=dropout,
1093
+ softmax_scale=softmax_scale,
1094
+ causal=causal,
1095
+ window_size=(self.config.sliding_window, self.config.sliding_window),
1096
+ )
1097
+
1098
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
1099
+ else:
1100
+ if not use_sliding_windows:
1101
+ attn_output = flash_attn_func(
1102
+ query_states,
1103
+ key_states,
1104
+ value_states,
1105
+ dropout,
1106
+ softmax_scale=softmax_scale,
1107
+ causal=causal,
1108
+ )
1109
+ else:
1110
+ attn_output = flash_attn_func(
1111
+ query_states,
1112
+ key_states,
1113
+ value_states,
1114
+ dropout,
1115
+ softmax_scale=softmax_scale,
1116
+ causal=causal,
1117
+ window_size=(self.config.sliding_window, self.config.sliding_window),
1118
+ )
1119
+
1120
+ return attn_output
1121
+
1122
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
1123
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
1124
+
1125
+ # On the first iteration we need to properly re-create the padding mask
1126
+ # by slicing it on the proper place
1127
+ if kv_seq_len != attention_mask.shape[-1]:
1128
+ attention_mask_num_tokens = attention_mask.shape[-1]
1129
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
1130
+
1131
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
1132
+
1133
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
1134
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
1135
+
1136
+ if query_length == kv_seq_len:
1137
+ query_layer = index_first_axis(
1138
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
1139
+ )
1140
+ cu_seqlens_q = cu_seqlens_k
1141
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
1142
+ indices_q = indices_k
1143
+ elif query_length == 1:
1144
+ max_seqlen_in_batch_q = 1
1145
+ cu_seqlens_q = torch.arange(
1146
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
1147
+ ) # There is a memcpy here, that is very bad.
1148
+ indices_q = cu_seqlens_q[:-1]
1149
+ query_layer = query_layer.squeeze(1)
1150
+ else:
1151
+ # The -q_len: slice assumes left padding.
1152
+ attention_mask = attention_mask[:, -query_length:]
1153
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
1154
+
1155
+ return (
1156
+ query_layer,
1157
+ key_layer,
1158
+ value_layer,
1159
+ indices_q,
1160
+ (cu_seqlens_q, cu_seqlens_k),
1161
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
1162
+ )
1163
+
1164
+
1165
+ IDEFICS2_PERCEIVER_ATTENTION_CLASSES = {
1166
+ "eager": Idefics2PerceiverAttention,
1167
+ "flash_attention_2": Idefics2PerceiverFlashAttention2,
1168
+ }
1169
+
1170
+
1171
+ class Idefics2PerceiverLayer(nn.Module):
1172
+ def __init__(self, config, layer_idx: int):
1173
+ super().__init__()
1174
+ self.hidden_size = config.text_config.hidden_size
1175
+ self.n_latents = config.perceiver_config.resampler_n_latents
1176
+ self.depth = config.perceiver_config.resampler_depth
1177
+ self.rms_norm_eps = config.text_config.rms_norm_eps
1178
+
1179
+ self.input_latents_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1180
+ self.input_context_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1181
+ self.self_attn = IDEFICS2_PERCEIVER_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
1182
+ self.post_attention_layernorm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1183
+ self.mlp = Idefics2MLP(
1184
+ hidden_size=config.text_config.hidden_size,
1185
+ intermediate_size=config.text_config.hidden_size * 4,
1186
+ output_size=config.text_config.hidden_size,
1187
+ hidden_act=config.perceiver_config.hidden_act,
1188
+ )
1189
+
1190
+ def forward(
1191
+ self,
1192
+ latents: torch.Tensor,
1193
+ context: torch.Tensor,
1194
+ attention_mask: Optional[torch.Tensor] = None,
1195
+ position_ids: Optional[torch.LongTensor] = None,
1196
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1197
+ output_attentions: Optional[bool] = False,
1198
+ use_cache: Optional[bool] = False,
1199
+ **kwargs,
1200
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
1201
+ """
1202
+ Args:
1203
+ latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1204
+ context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
1205
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
1206
+ `(batch, sequence_length)` where padding elements are indicated by 0.
1207
+ output_attentions (`bool`, *optional*):
1208
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
1209
+ returned tensors for more detail.
1210
+ use_cache (`bool`, *optional*):
1211
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
1212
+ (see `past_key_values`).
1213
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
1214
+ """
1215
+ residual = latents
1216
+
1217
+ latents = self.input_latents_norm(latents)
1218
+ context = self.input_context_norm(context)
1219
+
1220
+ latents, self_attn_weights, present_key_value = self.self_attn(
1221
+ latents=latents,
1222
+ context=context,
1223
+ attention_mask=attention_mask,
1224
+ )
1225
+ latents = residual + latents
1226
+ residual = latents
1227
+
1228
+ latents = self.post_attention_layernorm(latents)
1229
+ latents = self.mlp(latents)
1230
+ latents = residual + latents
1231
+
1232
+ outputs = (latents,)
1233
+
1234
+ if output_attentions:
1235
+ outputs += (self_attn_weights,)
1236
+
1237
+ if use_cache:
1238
+ outputs += (present_key_value,)
1239
+
1240
+ return outputs
1241
+
1242
+
1243
+ class Idefics2PerceiverResampler(nn.Module):
1244
+ def __init__(self, config) -> None:
1245
+ """
1246
+ Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
1247
+ MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
1248
+ returns a Tensor of shape [bsz, n_latents, embed_dim]. The Resampler acts as a form of learned pooling and
1249
+ is derived from [Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206).
1250
+ """
1251
+ super().__init__()
1252
+ self.hidden_size = config.text_config.hidden_size
1253
+ self.hidden_act = config.perceiver_config.hidden_act
1254
+ self.n_latents = config.perceiver_config.resampler_n_latents
1255
+ self.depth = config.perceiver_config.resampler_depth
1256
+ self.rms_norm_eps = config.text_config.rms_norm_eps
1257
+
1258
+ # Create Latents for Perceiver
1259
+ self.latents = nn.Parameter(torch.ones(self.n_latents, self.hidden_size))
1260
+
1261
+ # Create Transformer Blocks
1262
+ self.layers = nn.ModuleList([Idefics2PerceiverLayer(config, idx) for idx in range(self.depth)])
1263
+ self.norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps)
1264
+
1265
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1266
+
1267
+ def forward(
1268
+ self,
1269
+ context: torch.Tensor,
1270
+ attention_mask,
1271
+ ) -> torch.Tensor:
1272
+ # seq embed -> bsz seq embed
1273
+ latents = self.latents.unsqueeze(0).expand((context.shape[0], *self.latents.size()))
1274
+
1275
+ latent_attention_mask = torch.ones(
1276
+ (attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device
1277
+ )
1278
+ attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1)
1279
+ attention_mask = (
1280
+ _prepare_4d_attention_mask(attention_mask, latents.dtype, tgt_len=self.n_latents)
1281
+ if not self._use_flash_attention_2
1282
+ else attention_mask
1283
+ )
1284
+
1285
+ compressed_context = latents
1286
+ for perceiver_layer in self.layers:
1287
+ layer_outputs = perceiver_layer(
1288
+ compressed_context,
1289
+ context,
1290
+ attention_mask=attention_mask,
1291
+ position_ids=None,
1292
+ past_key_value=None,
1293
+ output_attentions=False,
1294
+ use_cache=False,
1295
+ )
1296
+
1297
+ compressed_context = layer_outputs[0]
1298
+
1299
+ compressed_context = self.norm(compressed_context)
1300
+
1301
+ return compressed_context
1302
+
1303
+
1304
+ class Idefics2Connector(nn.Module):
1305
+ def __init__(self, config):
1306
+ super().__init__()
1307
+ self.modality_projection = Idefics2MLP(
1308
+ hidden_size=config.vision_config.hidden_size,
1309
+ intermediate_size=config.text_config.intermediate_size,
1310
+ output_size=config.text_config.hidden_size,
1311
+ hidden_act=config.text_config.hidden_act,
1312
+ )
1313
+ self.perceiver_resampler = Idefics2PerceiverResampler(config)
1314
+
1315
+ def forward(self, image_hidden_states, attention_mask):
1316
+ image_hidden_states = self.modality_projection(image_hidden_states)
1317
+ image_hidden_states = self.perceiver_resampler(context=image_hidden_states, attention_mask=attention_mask)
1318
+ return image_hidden_states
1319
+
1320
+
1321
+ IDEFICS2_START_DOCSTRING = r"""
1322
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1323
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1324
+ etc.)
1325
+
1326
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1327
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1328
+ and behavior.
1329
+
1330
+ Parameters:
1331
+ config ([`Idefics2Config`] or [`Idefics2VisionConfig`]):
1332
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1333
+ load the weights associated with the model, only the configuration. Check out the
1334
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1335
+ """
1336
+
1337
+
1338
+ @add_start_docstrings(
1339
+ "The bare Idefics2 Model outputting raw hidden-states without any specific head on top.",
1340
+ IDEFICS2_START_DOCSTRING,
1341
+ )
1342
+ class Idefics2PreTrainedModel(PreTrainedModel):
1343
+ config_class = Idefics2Config
1344
+ base_model_prefix = "model"
1345
+ supports_gradient_checkpointing = True
1346
+ _no_split_modules = ["Idefics2VisionAttention", "Idefics2MLP", "Idefics2PerceiverLayer", "Idefics2DecoderLayer"]
1347
+ _skip_keys_device_placement = "past_key_values"
1348
+ _supports_flash_attn_2 = True
1349
+
1350
+ def _init_weights(self, module):
1351
+ # important: this ported version of Idefics2 isn't meant for training from scratch - only
1352
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
1353
+ # https://github.com/haotian-liu/LLaVA/tree/main/idefics2 should serve for that purpose
1354
+ std = (
1355
+ self.config.text_config.initializer_range
1356
+ if hasattr(self.config, "initializer_range")
1357
+ else self.config.text_config.initializer_range
1358
+ )
1359
+
1360
+ if hasattr(module, "class_embedding"):
1361
+ module.class_embedding.data.normal_(mean=0.0, std=std)
1362
+
1363
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
1364
+ module.weight.data.normal_(mean=0.0, std=std)
1365
+ if module.bias is not None:
1366
+ module.bias.data.zero_()
1367
+ elif isinstance(module, nn.Embedding):
1368
+ module.weight.data.normal_(mean=0.0, std=std)
1369
+ if module.padding_idx is not None:
1370
+ module.weight.data[module.padding_idx].zero_()
1371
+
1372
+ @classmethod
1373
+ def _autoset_attn_implementation(
1374
+ cls,
1375
+ config,
1376
+ use_flash_attention_2: bool = False,
1377
+ torch_dtype: Optional[torch.dtype] = None,
1378
+ device_map: Optional[Union[str, Dict[str, int]]] = None,
1379
+ check_device_map: bool = True,
1380
+ **kwargs,
1381
+ ):
1382
+ """
1383
+ Overrides the method in `PreTrainedModel` to update the vision config with the correct attention implementation
1384
+ """
1385
+ config = super()._autoset_attn_implementation(
1386
+ config=config,
1387
+ use_flash_attention_2=use_flash_attention_2,
1388
+ torch_dtype=torch_dtype,
1389
+ device_map=device_map,
1390
+ check_device_map=check_device_map,
1391
+ **kwargs,
1392
+ )
1393
+ config.vision_config._attn_implementation = config._attn_implementation
1394
+ return config
1395
+
1396
+
1397
+ IDEFICS2_INPUTS_DOCSTRING = r"""
1398
+ Args:
1399
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1400
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1401
+ it.
1402
+
1403
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1404
+ [`PreTrainedTokenizer.__call__`] for details.
1405
+
1406
+ [What are input IDs?](../glossary#input-ids)
1407
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1408
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1409
+
1410
+ - 1 for tokens that are **not masked**,
1411
+ - 0 for tokens that are **masked**.
1412
+
1413
+ [What are attention masks?](../glossary#attention-mask)
1414
+
1415
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1416
+ [`PreTrainedTokenizer.__call__`] for details.
1417
+
1418
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1419
+ `past_key_values`).
1420
+
1421
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1422
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1423
+ information on the default strategy.
1424
+
1425
+ - 1 indicates the head is **not masked**,
1426
+ - 0 indicates the head is **masked**.
1427
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1428
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1429
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
1430
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1431
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
1432
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
1433
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
1434
+
1435
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1436
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1437
+
1438
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1439
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1440
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1441
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1442
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1443
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1444
+ model's internal embedding lookup matrix.
1445
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
1446
+ The tensors corresponding to the input images. Pixel values can be obtained using
1447
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
1448
+ [`CLIPImageProcessor`] for processing images).
1449
+ pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
1450
+ Mask to avoid performing attention on padding pixel indices.
1451
+ image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1452
+ The hidden states of the image encoder after modality projection and perceiver resampling.
1453
+ use_cache (`bool`, *optional*):
1454
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1455
+ `past_key_values`).
1456
+ output_attentions (`bool`, *optional*):
1457
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1458
+ tensors for more detail.
1459
+ output_hidden_states (`bool`, *optional*):
1460
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1461
+ more detail.
1462
+ return_dict (`bool`, *optional*):
1463
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1464
+ """
1465
+
1466
+
1467
+ @add_start_docstrings(
1468
+ """Idefics2 model consisting of a SIGLIP vision encoder and Mistral language decoder""",
1469
+ IDEFICS2_START_DOCSTRING,
1470
+ )
1471
+ class Idefics2Model(Idefics2PreTrainedModel):
1472
+ def __init__(self, config: Idefics2Config):
1473
+ super().__init__(config)
1474
+ self.padding_idx = self.config.text_config.pad_token_id
1475
+ self.vocab_size = self.config.text_config.vocab_size
1476
+
1477
+ self.vision_model = Idefics2VisionTransformer(config.vision_config)
1478
+ self.connector = Idefics2Connector(config)
1479
+ self.text_model = AutoModel.from_config(config.text_config)
1480
+
1481
+ self.image_seq_len = config.perceiver_config.resampler_n_latents
1482
+ self.image_token_id = self.config.image_token_id
1483
+
1484
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1485
+
1486
+ self.post_init()
1487
+
1488
+ def enable_input_require_grads(self):
1489
+ """
1490
+ Enables the gradients for the input embeddings.
1491
+
1492
+ This is useful for lora when using gradient checkpointing.
1493
+ c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
1494
+
1495
+ Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
1496
+ """
1497
+
1498
+ def get_lowest_module(module):
1499
+ if len(list(module.children())) == 0:
1500
+ # If the module has no children, it is a leaf module (e.g., Linear, Conv2d, etc.)
1501
+ return module
1502
+ else:
1503
+ # Recursively call the function on each child module
1504
+ return get_lowest_module(list(module.children())[0])
1505
+
1506
+ def make_inputs_require_grads(module, input, output):
1507
+ output.requires_grad_(True)
1508
+
1509
+ self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
1510
+ self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook(
1511
+ make_inputs_require_grads
1512
+ )
1513
+
1514
+ def get_input_embeddings(self):
1515
+ return self.text_model.get_input_embeddings()
1516
+
1517
+ def set_input_embeddings(self, value):
1518
+ self.text_model.set_input_embeddings(value)
1519
+
1520
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
1521
+ model_embeds = self.text_model.resize_token_embeddings(
1522
+ new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of
1523
+ )
1524
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
1525
+ return model_embeds
1526
+
1527
+ def inputs_merger(
1528
+ self,
1529
+ input_ids: torch.LongTensor,
1530
+ inputs_embeds: Optional[torch.Tensor],
1531
+ image_hidden_states: Optional[torch.Tensor],
1532
+ ):
1533
+ """
1534
+ This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
1535
+ The merging happens as follows:
1536
+ - The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
1537
+ - We get the image hidden states for the image through the vision encoder (and potentially the perceiver), and that hidden state is then projected into the text embedding space.
1538
+ We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
1539
+ - The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
1540
+ - To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
1541
+ """
1542
+ num_images, _, vision_hidden_size = image_hidden_states.shape
1543
+ special_image_token_mask = input_ids == self.image_token_id
1544
+ new_inputs_embeds = inputs_embeds.clone()
1545
+ reshaped_image_hidden_states = image_hidden_states.view(-1, vision_hidden_size)
1546
+ new_inputs_embeds[special_image_token_mask] = reshaped_image_hidden_states
1547
+ return new_inputs_embeds
1548
+
1549
+ @add_start_docstrings_to_model_forward(
1550
+ """
1551
+ Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
1552
+ the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
1553
+ max_num_images is the maximum number of images among the batch_size samples in the batch.
1554
+
1555
+ Padding images are not needed beyond padding the pixel_values at the entrance of the model.
1556
+ For efficiency, we only pass through the vision_model's forward the real images by
1557
+ discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
1558
+ image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
1559
+ """,
1560
+ IDEFICS2_INPUTS_DOCSTRING,
1561
+ )
1562
+ def forward(
1563
+ self,
1564
+ input_ids: torch.LongTensor = None,
1565
+ attention_mask: Optional[torch.Tensor] = None,
1566
+ position_ids: Optional[torch.LongTensor] = None,
1567
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1568
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1569
+ pixel_values: Optional[torch.FloatTensor] = None,
1570
+ pixel_attention_mask: Optional[torch.BoolTensor] = None,
1571
+ image_hidden_states: Optional[torch.FloatTensor] = None,
1572
+ use_cache: Optional[bool] = None,
1573
+ output_attentions: Optional[bool] = None,
1574
+ output_hidden_states: Optional[bool] = None,
1575
+ return_dict: Optional[bool] = None,
1576
+ ) -> Union[Tuple, Idefics2BaseModelOutputWithPast]:
1577
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1578
+ output_hidden_states = (
1579
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1580
+ )
1581
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1582
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1583
+
1584
+ # retrieve input_ids and inputs_embeds
1585
+ if input_ids is not None:
1586
+ batch_size, seq_length = input_ids.shape
1587
+ elif inputs_embeds is not None:
1588
+ batch_size, seq_length, _ = inputs_embeds.shape
1589
+ else:
1590
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1591
+
1592
+ past_seen_tokens = 0
1593
+ if use_cache:
1594
+ if not isinstance(past_key_values, Cache):
1595
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1596
+ past_seen_tokens = past_key_values.get_usable_length(seq_length)
1597
+
1598
+ if inputs_embeds is not None and input_ids is None and past_seen_tokens == 0:
1599
+ raise ValueError("When first calling the model, if input_embeds are passed, input_ids should not be None.")
1600
+
1601
+ if inputs_embeds is None:
1602
+ inputs_embeds = self.text_model.get_input_embeddings()(input_ids)
1603
+
1604
+ # START VISUAL INPUTS INTEGRATION
1605
+ if pixel_values is not None and image_hidden_states is not None:
1606
+ raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time")
1607
+ elif pixel_values is not None:
1608
+ batch_size, num_images, num_channels, height, width = pixel_values.shape
1609
+ pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility
1610
+ pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
1611
+
1612
+ # Remove padding images - padding images are full 0.
1613
+ nb_values_per_image = pixel_values.shape[1:].numel()
1614
+ real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
1615
+ pixel_values = pixel_values[real_images_inds].contiguous()
1616
+
1617
+ # Handle the vision attention mask
1618
+ if pixel_attention_mask is None:
1619
+ pixel_attention_mask = torch.ones(
1620
+ size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)),
1621
+ dtype=torch.bool,
1622
+ device=pixel_values.device,
1623
+ )
1624
+ else:
1625
+ # Remove padding images from the mask/pP p
1626
+ pixel_attention_mask = pixel_attention_mask.view(
1627
+ batch_size * num_images, *pixel_attention_mask.shape[2:]
1628
+ )
1629
+ pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
1630
+
1631
+ patch_size = self.config.vision_config.patch_size
1632
+ patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
1633
+ patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
1634
+ patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
1635
+
1636
+ # Get sequence from the vision encoder
1637
+ image_hidden_states = self.vision_model(
1638
+ pixel_values=pixel_values,
1639
+ patch_attention_mask=patch_attention_mask,
1640
+ ).last_hidden_state
1641
+
1642
+ # Modality projection & resampling
1643
+ image_hidden_states = self.connector(
1644
+ image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1)
1645
+ )
1646
+
1647
+ elif image_hidden_states is not None:
1648
+ image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
1649
+
1650
+ if past_seen_tokens == 0 and inputs_embeds is not None and image_hidden_states is not None:
1651
+ # When we generate, we don't want to replace the potential image_token_id that we generated by images
1652
+ # that simply don't exist
1653
+ inputs_embeds = self.inputs_merger(
1654
+ input_ids=input_ids,
1655
+ inputs_embeds=inputs_embeds,
1656
+ image_hidden_states=image_hidden_states,
1657
+ )
1658
+
1659
+ outputs = self.text_model(
1660
+ inputs_embeds=inputs_embeds,
1661
+ attention_mask=attention_mask,
1662
+ position_ids=position_ids,
1663
+ past_key_values=past_key_values,
1664
+ output_attentions=output_attentions,
1665
+ output_hidden_states=output_hidden_states,
1666
+ return_dict=return_dict,
1667
+ )
1668
+
1669
+ if not return_dict:
1670
+ return tuple(v for v in [*outputs, image_hidden_states] if v is not None)
1671
+
1672
+ return Idefics2BaseModelOutputWithPast(
1673
+ last_hidden_state=outputs.last_hidden_state,
1674
+ past_key_values=outputs.past_key_values,
1675
+ hidden_states=outputs.hidden_states,
1676
+ attentions=outputs.attentions,
1677
+ image_hidden_states=image_hidden_states,
1678
+ )
1679
+
1680
+
1681
+ @add_start_docstrings(
1682
+ """The Idefics2 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top. """,
1683
+ IDEFICS2_START_DOCSTRING,
1684
+ )
1685
+ class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel):
1686
+ _tied_weights_keys = ["lm_head.weight"]
1687
+
1688
+ def __init__(self, config):
1689
+ super().__init__(config)
1690
+ self.model = Idefics2Model(config)
1691
+ self.image_token_id = self.config.image_token_id
1692
+
1693
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
1694
+ self.vocab_size = config.text_config.vocab_size
1695
+
1696
+ # Initialize weights and apply final processing
1697
+ self.post_init()
1698
+
1699
+ def enable_input_require_grads(self):
1700
+ """
1701
+ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
1702
+ the model weights fixed.
1703
+ """
1704
+
1705
+ def make_inputs_require_grads(module, input, output):
1706
+ output.requires_grad_(True)
1707
+
1708
+ self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
1709
+ self._vision_require_grads_hook = self.model.vision_model.get_input_embeddings().register_forward_hook(
1710
+ make_inputs_require_grads
1711
+ )
1712
+
1713
+ def get_input_embeddings(self):
1714
+ return self.model.text_model.get_input_embeddings()
1715
+
1716
+ def set_input_embeddings(self, value):
1717
+ self.model.text_model.set_input_embeddings(value)
1718
+
1719
+ def get_output_embeddings(self):
1720
+ return self.lm_head
1721
+
1722
+ def set_output_embeddings(self, new_embeddings):
1723
+ self.lm_head = new_embeddings
1724
+
1725
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
1726
+ # model_embeds = self.model.resize_token_embeddings(new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of)
1727
+ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1728
+ if new_num_tokens is None and pad_to_multiple_of is None:
1729
+ return model_embeds
1730
+
1731
+ # Update base model and current model config
1732
+ # Ignore copy
1733
+ self.config.text_config.vocab_size = model_embeds.weight.shape[0]
1734
+ self.vocab_size = self.config.text_config.vocab_size
1735
+
1736
+ # Tie weights again if needed
1737
+ self.tie_weights()
1738
+
1739
+ return model_embeds
1740
+
1741
+ def tie_weights(self):
1742
+ """
1743
+ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding.
1744
+ """
1745
+ output_embeddings = self.get_output_embeddings()
1746
+ input_embeddings = self.get_input_embeddings()
1747
+
1748
+ if getattr(self.config, "tie_word_embeddings", True):
1749
+ output_embeddings.weight = input_embeddings.weight
1750
+
1751
+ @add_start_docstrings_to_model_forward(IDEFICS2_INPUTS_DOCSTRING)
1752
+ @replace_return_docstrings(output_type=Idefics2CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1753
+ def forward(
1754
+ self,
1755
+ input_ids: torch.LongTensor = None,
1756
+ attention_mask: Optional[torch.Tensor] = None,
1757
+ position_ids: Optional[torch.LongTensor] = None,
1758
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1759
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1760
+ pixel_values: Optional[torch.FloatTensor] = None,
1761
+ pixel_attention_mask: Optional[torch.BoolTensor] = None,
1762
+ image_hidden_states: Optional[torch.FloatTensor] = None,
1763
+ labels: Optional[torch.LongTensor] = None,
1764
+ use_cache: Optional[bool] = None,
1765
+ output_attentions: Optional[bool] = None,
1766
+ output_hidden_states: Optional[bool] = None,
1767
+ return_dict: Optional[bool] = None,
1768
+ ) -> Union[Tuple, Idefics2CausalLMOutputWithPast]:
1769
+ r"""
1770
+ Args:
1771
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1772
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1773
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1774
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1775
+
1776
+ Returns:
1777
+
1778
+ Example:
1779
+
1780
+ ```python
1781
+ >>> import requests
1782
+ >>> import torch
1783
+ >>> from PIL import Image
1784
+ >>> from io import BytesIO
1785
+
1786
+ >>> from transformers import AutoProcessor, AutoModelForVision2Seq
1787
+ >>> from transformers.image_utils import load_image
1788
+
1789
+ >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
1790
+ >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
1791
+ >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
1792
+ >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
1793
+
1794
+ >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b-base")
1795
+ >>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/idefics2-8b-base", device_map="auto")
1796
+
1797
+ >>> BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
1798
+ >>> EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
1799
+
1800
+ >>> # Create inputs
1801
+ >>> prompts = [
1802
+ ... "<image>In this image, we can see the city of New York, and more specifically the Statue of Liberty.<image>In this image,",
1803
+ ... "In which city is that bridge located?<image>",
1804
+ ... ]
1805
+ >>> images = [[image1, image2], [image3]]
1806
+ >>> inputs = processor(text=prompts, padding=True, return_tensors="pt").to("cuda")
1807
+
1808
+ >>> # Generate
1809
+ >>> generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=20)
1810
+ >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
1811
+
1812
+ >>> print(generated_texts)
1813
+ ['In this image, we can see the city of New York, and more specifically the Statue of Liberty. In this image, we can see the city of New York, and more specifically the Statue of Liberty.\n\n', 'In which city is that bridge located?\n\nThe bridge is located in the city of Pittsburgh, Pennsylvania.\n\n\nThe bridge is']
1814
+ ```"""
1815
+
1816
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1817
+ output_hidden_states = (
1818
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1819
+ )
1820
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1821
+
1822
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1823
+ outputs = self.model(
1824
+ input_ids=input_ids,
1825
+ attention_mask=attention_mask,
1826
+ position_ids=position_ids,
1827
+ past_key_values=past_key_values,
1828
+ inputs_embeds=inputs_embeds,
1829
+ pixel_values=pixel_values,
1830
+ pixel_attention_mask=pixel_attention_mask,
1831
+ image_hidden_states=image_hidden_states,
1832
+ use_cache=use_cache,
1833
+ output_attentions=output_attentions,
1834
+ output_hidden_states=output_hidden_states,
1835
+ return_dict=return_dict,
1836
+ )
1837
+
1838
+ hidden_states = outputs[0]
1839
+ logits = self.lm_head(hidden_states)
1840
+ logits = logits.float()
1841
+
1842
+ loss = None
1843
+ if labels is not None:
1844
+ labels = labels.to(logits.device)
1845
+ # Shift so that tokens < n predict n
1846
+ if attention_mask is not None:
1847
+ shift_attention_mask = attention_mask[..., 1:].to(logits.device)
1848
+ shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous()
1849
+ shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous()
1850
+ else:
1851
+ shift_logits = logits[..., :-1, :].contiguous()
1852
+ shift_labels = labels[..., 1:].contiguous()
1853
+ # Flatten the tokens
1854
+ loss_fct = CrossEntropyLoss(ignore_index=self.image_token_id)
1855
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1856
+
1857
+ if not return_dict:
1858
+ output = (logits,) + outputs[1:]
1859
+ return (loss,) + output if loss is not None else output
1860
+
1861
+ return Idefics2CausalLMOutputWithPast(
1862
+ loss=loss,
1863
+ logits=logits,
1864
+ past_key_values=outputs.past_key_values,
1865
+ hidden_states=outputs.hidden_states,
1866
+ attentions=outputs.attentions,
1867
+ image_hidden_states=outputs.image_hidden_states,
1868
+ )
1869
+
1870
+ def prepare_inputs_for_generation(
1871
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1872
+ ):
1873
+ # Omit tokens covered by past_key_values
1874
+ if past_key_values is not None:
1875
+ if isinstance(past_key_values, Cache):
1876
+ cache_length = past_key_values.get_seq_length()
1877
+ past_length = past_key_values.seen_tokens
1878
+ max_cache_length = past_key_values.get_max_length()
1879
+ else:
1880
+ cache_length = past_length = past_key_values[0][0].shape[2]
1881
+ max_cache_length = None
1882
+
1883
+ # Keep only the unprocessed tokens:
1884
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1885
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1886
+ # input)
1887
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1888
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1889
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1890
+ # input_ids based on the past_length.
1891
+ elif past_length < input_ids.shape[1]:
1892
+ input_ids = input_ids[:, past_length:]
1893
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1894
+
1895
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1896
+ if (
1897
+ max_cache_length is not None
1898
+ and attention_mask is not None
1899
+ and cache_length + input_ids.shape[1] > max_cache_length
1900
+ ):
1901
+ attention_mask = attention_mask[:, -max_cache_length:]
1902
+
1903
+ position_ids = kwargs.get("position_ids", None)
1904
+ if attention_mask is not None and position_ids is None:
1905
+ # create position_ids on the fly for batch generation
1906
+ position_ids = attention_mask.long().cumsum(-1) - 1
1907
+ position_ids.masked_fill_(attention_mask == 0, 1)
1908
+ if past_key_values:
1909
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1910
+
1911
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1912
+ if inputs_embeds is not None and past_key_values is None:
1913
+ model_inputs = {"inputs_embeds": inputs_embeds}
1914
+ else:
1915
+ model_inputs = {"input_ids": input_ids}
1916
+
1917
+ image_hidden_states = kwargs.get("image_hidden_states", None)
1918
+ if image_hidden_states is not None:
1919
+ pixel_values = None
1920
+ pixel_attention_mask = None
1921
+ else:
1922
+ pixel_values = kwargs.get("pixel_values", None)
1923
+ pixel_attention_mask = kwargs.get("pixel_attention_mask", None)
1924
+ model_inputs.update(
1925
+ {
1926
+ "position_ids": position_ids,
1927
+ "past_key_values": past_key_values,
1928
+ "use_cache": kwargs.get("use_cache"),
1929
+ "attention_mask": attention_mask,
1930
+ "pixel_values": pixel_values,
1931
+ "pixel_attention_mask": pixel_attention_mask,
1932
+ "image_hidden_states": image_hidden_states,
1933
+ }
1934
+ )
1935
+ return model_inputs
1936
+
1937
+ def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs):
1938
+ model_kwargs = super()._update_model_kwargs_for_generation(
1939
+ outputs=outputs,
1940
+ model_kwargs=model_kwargs,
1941
+ is_encoder_decoder=is_encoder_decoder,
1942
+ **kwargs,
1943
+ )
1944
+ # Get the precomputed image_hidden_states
1945
+ model_kwargs["image_hidden_states"] = outputs.image_hidden_states
1946
+ return model_kwargs
1947
+
1948
+ @staticmethod
1949
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
1950
+ def _reorder_cache(past_key_values, beam_idx):
1951
+ reordered_past = ()
1952
+ for layer_past in past_key_values:
1953
+ reordered_past += (
1954
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1955
+ )
1956
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics2/processing_idefics2.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for IDEFICS2.
17
+ """
18
+
19
+ from typing import TYPE_CHECKING, Dict, List, Optional, Union
20
+
21
+ from ...feature_extraction_utils import BatchFeature
22
+ from ...image_utils import ImageInput, is_valid_image, load_image
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...tokenization_utils_base import AddedToken, BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
25
+ from ...utils import TensorType, logging
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ from ...pipelines.conversational import Conversation
30
+ from ...tokenization_utils_base import PreTokenizedInput
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ def is_url(val) -> bool:
37
+ return isinstance(val, str) and val.startswith("http")
38
+
39
+
40
+ def is_image_or_image_url(elem):
41
+ return is_url(elem) or is_valid_image(elem)
42
+
43
+
44
+ class Idefics2Processor(ProcessorMixin):
45
+ r"""
46
+ Constructs a IDEFICS2 processor which wraps a LLama tokenizer and IDEFICS2 image processor into a single processor.
47
+
48
+ [`IdeficsProcessor`] offers all the functionalities of [`Idefics2ImageProcessor`] and [`LlamaTokenizerFast`]. See
49
+ the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
50
+
51
+ Args:
52
+ image_processor (`Idefics2ImageProcessor`):
53
+ An instance of [`Idefics2ImageProcessor`]. The image processor is a required input.
54
+ tokenizer (`PreTrainedTokenizerBase`, *optional*):
55
+ An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
56
+ image_seq_len (`int`, *optional*, defaults to 64):
57
+ The length of the image sequence i.e. the number of <image> tokens per image in the input.
58
+ This parameter is used to build the string from the input prompt and image tokens and should match the
59
+ config.perceiver_config.resampler_n_latents value for the model used.
60
+ """
61
+
62
+ attributes = ["image_processor", "tokenizer"]
63
+ image_processor_class = "Idefics2ImageProcessor"
64
+ tokenizer_class = "AutoTokenizer"
65
+
66
+ def __init__(self, image_processor, tokenizer=None, image_seq_len: int = 64, **kwargs):
67
+ if image_processor is None:
68
+ raise ValueError("You need to specify an `image_processor`.")
69
+ if tokenizer is None:
70
+ raise ValueError("You need to specify a `tokenizer`.")
71
+
72
+ self.fake_image_token = AddedToken("<fake_token_around_image>", normalized=False, special=True)
73
+ self.image_token = AddedToken("<image>", normalized=False, special=True)
74
+ self.end_of_utterance_token = AddedToken("<end_of_utterance>", normalized=False, special=True)
75
+ self.image_seq_len = image_seq_len
76
+
77
+ tokens_to_add = {
78
+ "additional_special_tokens": [self.fake_image_token, self.image_token, self.end_of_utterance_token]
79
+ }
80
+ tokenizer.add_special_tokens(tokens_to_add)
81
+
82
+ # Stores a Jinja template that formats chat histories into tokenizable strings
83
+ self.chat_template = kwargs.pop("chat_template", None)
84
+
85
+ super().__init__(image_processor, tokenizer)
86
+
87
+ def _extract_images_from_prompts(self, prompts):
88
+ prompt_images = []
89
+ for prompt in prompts:
90
+ images = []
91
+ for elem in prompt:
92
+ if is_valid_image(elem):
93
+ images.append(elem)
94
+ elif is_url(elem):
95
+ images.append(load_image(elem))
96
+ prompt_images.append(images)
97
+ return prompt_images
98
+
99
+ def __call__(
100
+ self,
101
+ text: Union[TextInput, "PreTokenizedInput", List[TextInput], List["PreTokenizedInput"]] = None,
102
+ images: Union[ImageInput, List[ImageInput], List[List[ImageInput]]] = None,
103
+ image_seq_len: Optional[int] = None,
104
+ padding: Union[bool, str, PaddingStrategy] = False,
105
+ truncation: Union[bool, str, TruncationStrategy] = None,
106
+ max_length: Optional[int] = None,
107
+ is_split_into_words: bool = False,
108
+ add_special_tokens: bool = True,
109
+ return_tensors: Optional[Union[str, TensorType]] = None,
110
+ ) -> BatchEncoding:
111
+ """
112
+ Processes the input prompts and returns a BatchEncoding.
113
+
114
+ Example:
115
+
116
+ ```python
117
+ >>> import requests
118
+ >>> from transformers import Idefics2Processor
119
+ >>> from transformers.image_utils import load_image
120
+
121
+ >>> processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2)
122
+ >>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
123
+
124
+ >>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
125
+ >>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
126
+
127
+ >>> image1, image2 = load_image(url1), load_image(url2)
128
+ >>> images = [[image1], [image2]]
129
+
130
+ >>> text = [
131
+ ... "<image>In this image, we see",
132
+ ... "bla bla bla<image>",
133
+ ... ]
134
+ >>> outputs = processor(text=text, images=images, return_tensors="pt", padding=True)
135
+ >>> input_ids = outputs.input_ids
136
+ >>> input_tokens = processor.tokenizer.batch_decode(input_ids)
137
+ >>> print(input_tokens)
138
+ ['<s><fake_token_around_image><image><image><fake_token_around_image> In this image, we see', '<s> bla bla bla<fake_token_around_image><image><image><fake_token_around_image>']
139
+ ```
140
+
141
+ Args:
142
+ text (`Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]`, *optional*):
143
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
144
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
145
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
146
+
147
+ Wherever an image token, `<image>` is encountered it is expanded to
148
+ `<fake_token_around_image>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
149
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):
150
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
151
+ tensor. If is of type `List[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
152
+ image_seq_len (`int`, *optional*):
153
+ The length of the image sequence. If not provided, the default value is used.
154
+ padding (`Union[bool, str, PaddingStrategy]`, *optional*, defaults to `False`):
155
+ Padding strategy applied to the input ids. See [`PreTrainedTokenizerFast.pad`] for more information.
156
+ truncation (`Union[bool, str, TruncationStrategy]`, *optional*):
157
+ Truncation strategy applied to the input ids. See [`PreTrainedTokenizerFast.truncate`] for more information.
158
+ max_length (`int`, *optional*):
159
+ Maximum length of the returned list and optionally padding/truncation length. See
160
+ [`PreTrainedTokenizerFast.__call__`] for more information.
161
+ is_split_into_words (`bool`, *optional*, defaults to `False`):
162
+ Whether the input text is split into words or not. If set to `True`, the tokenizer will skip the
163
+ tokenization process and assume the input is already tokenized.
164
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
165
+ Whether to add special tokens or not. See [`PreTrainedTokenizerFast.__call__`] for more information.
166
+ return_tensors (`Union[str, TensorType]`, *optional*):
167
+ If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
168
+ information.
169
+ """
170
+ image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len
171
+
172
+ n_images_in_text = []
173
+ inputs = BatchFeature()
174
+
175
+ if text is not None:
176
+ if isinstance(text, str):
177
+ text = [text]
178
+ elif not isinstance(text, list) and not isinstance(text[0], str):
179
+ raise ValueError("Invalid input text. Please provide a string, or a list of strings")
180
+
181
+ # Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
182
+ fake_image_token = self.fake_image_token.content
183
+ image_token = self.image_token.content
184
+ image_str = f"{fake_image_token}{image_token * image_seq_len}{fake_image_token}"
185
+
186
+ if self.image_processor.do_image_splitting:
187
+ # A single image token is split into 4 patches + 1 original image
188
+ image_str = image_str * 5
189
+
190
+ prompt_strings = []
191
+ for sample in text:
192
+ n_images_in_text.append(sample.count(image_token))
193
+ sample = sample.replace(image_token, image_str)
194
+ # Remove any double fake tokens if images are adjacent
195
+ sample = sample.replace(f"{fake_image_token}{fake_image_token}", f"{fake_image_token}")
196
+ prompt_strings.append(sample)
197
+
198
+ text_inputs = self.tokenizer(
199
+ text=prompt_strings,
200
+ add_special_tokens=add_special_tokens,
201
+ padding=padding,
202
+ truncation=truncation,
203
+ max_length=max_length,
204
+ is_split_into_words=is_split_into_words,
205
+ return_tensors=return_tensors,
206
+ )
207
+ inputs.update(text_inputs)
208
+
209
+ if images is not None:
210
+ if is_image_or_image_url(images):
211
+ images = [[images]]
212
+ elif isinstance(images, list) and is_image_or_image_url(images[0]):
213
+ images = [images]
214
+ elif (
215
+ not isinstance(images, list)
216
+ and not isinstance(images[0], list)
217
+ and not is_image_or_image_url(images[0][0])
218
+ ):
219
+ raise ValueError(
220
+ "Invalid input images. Please provide a single image or a list of images or a list of list of images."
221
+ )
222
+
223
+ n_images_in_images = [len(sample) for sample in images]
224
+ if text is not None and not n_images_in_images == n_images_in_text:
225
+ raise ValueError(
226
+ f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
227
+ )
228
+
229
+ # Load images if they are URLs
230
+ images = [[load_image(im) for im in sample] for sample in images]
231
+ image_inputs = self.image_processor(images, return_tensors=return_tensors)
232
+ inputs.update(image_inputs)
233
+
234
+ return inputs
235
+
236
+ def batch_decode(self, *args, **kwargs):
237
+ """
238
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
239
+ refer to the docstring of this method for more information.
240
+ """
241
+ return self.tokenizer.batch_decode(*args, **kwargs)
242
+
243
+ def decode(self, *args, **kwargs):
244
+ """
245
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
246
+ the docstring of this method for more information.
247
+ """
248
+ return self.tokenizer.decode(*args, **kwargs)
249
+
250
+ @property
251
+ def model_input_names(self):
252
+ tokenizer_input_names = self.tokenizer.model_input_names
253
+ image_processor_input_names = self.image_processor.model_input_names
254
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
255
+
256
+ def apply_chat_template(
257
+ self,
258
+ conversation: Union[List[Dict[str, str]], "Conversation"],
259
+ chat_template: Optional[str] = None,
260
+ tokenize: bool = False,
261
+ **kwargs,
262
+ ) -> str:
263
+ """
264
+ Overrides the tokenizer's `apply_chat_template` method to apply the IDEFICS2 chat template by default
265
+ if no chat template is provided.
266
+
267
+ By default, the output isn't tokenized. This is because the IDEFICS2 chat template is designed to insert
268
+ the image token <image> into the sequence according to the message, but does not handle expanding the image
269
+ tokens to the sequence length or adding the surrounding tokens e.g. <fake_image_token>.
270
+
271
+ Args:
272
+ conversation (`Union[List[Dict, str, str], "Conversation"]`):
273
+ The conversation to format.
274
+ chat_template (`Optional[str]`, *optional*):
275
+ The Jinja template to use for formatting the conversation. If not provided, the default chat template
276
+ is used.
277
+ tokenize (`bool`, *optional*, defaults to `False`):
278
+ Whether to tokenize the output or not.
279
+ **kwargs:
280
+ Additional keyword arguments for the tokenizer's `apply_chat_template` method.
281
+ """
282
+
283
+ if chat_template is None:
284
+ if self.chat_template is not None:
285
+ chat_template = self.chat_template
286
+ else:
287
+ chat_template = self.default_chat_template
288
+
289
+ return self.tokenizer.apply_chat_template(
290
+ conversation, chat_template=chat_template, tokenize=tokenize, **kwargs
291
+ )
292
+
293
+ @property
294
+ def default_chat_template(self):
295
+ """
296
+ This template formats inputs in the form of a chat history. For each message in the chat history:
297
+ * the template will output the role of the speaker followed by the content of the message.
298
+ * content can be a single string or a list of strings and images.
299
+ * If the content element is an image, the template will output a sequence of <image> tokens and <fake_token_around_image> token before and after each image
300
+ * The template will output an <end_of_utterance> token at the end of each message.
301
+
302
+ Example:
303
+
304
+ ```python
305
+ messages = [{
306
+ "role": "user",
307
+ "content": [
308
+ {"type": "text", "text": "What’s in this image?"},
309
+ {"type": "image"},
310
+ {"type": "image"},
311
+ ],
312
+ },
313
+ {
314
+ "role": "assistant",
315
+ "content": [{"type": "text", "text": "This picture depicts Idefix, the dog of Obelix in Asterix and Obelix. Idefix is running on the ground."},]
316
+ }]
317
+ ```
318
+
319
+ Will create outputs like:
320
+ ```
321
+ User: What is in this Image?<image><image><end_of_utterance>
322
+ Assistant: This picture depicts Idefix, the dog of Obelix in Asterix and Obelix. Idefix is running on the ground.<end_of_utterance>
323
+ ```
324
+ """
325
+ # fmt: off
326
+ return (
327
+ "{% for message in messages %}"
328
+ "{{message['role'].capitalize()}}"
329
+ "{% if message['content'][0]['type'] == 'image' %}"
330
+ "{{':'}}"
331
+ "{% else %}"
332
+ "{{': '}}"
333
+ "{% endif %}"
334
+ "{% for line in message['content'] %}"
335
+ "{% if line['type'] == 'text' %}"
336
+ "{{line['text']}}"
337
+ "{% elif line['type'] == 'image' %}"
338
+ "{{ '<image>' }}"
339
+ "{% endif %}"
340
+ "{% endfor %}"
341
+ "<end_of_utterance>\n"
342
+ "{% endfor %}"
343
+
344
+ "{% if add_generation_prompt %}"
345
+ "{{ 'Assistant:' }}"
346
+ "{% endif %}"
347
+ )
348
+ # fmt: on
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__init__.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_layoutlm": ["LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMConfig", "LayoutLMOnnxConfig"],
28
+ "tokenization_layoutlm": ["LayoutLMTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_layoutlm_fast"] = ["LayoutLMTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_layoutlm"] = [
46
+ "LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "LayoutLMForMaskedLM",
48
+ "LayoutLMForSequenceClassification",
49
+ "LayoutLMForTokenClassification",
50
+ "LayoutLMForQuestionAnswering",
51
+ "LayoutLMModel",
52
+ "LayoutLMPreTrainedModel",
53
+ ]
54
+
55
+ try:
56
+ if not is_tf_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ _import_structure["modeling_tf_layoutlm"] = [
62
+ "TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
63
+ "TFLayoutLMForMaskedLM",
64
+ "TFLayoutLMForSequenceClassification",
65
+ "TFLayoutLMForTokenClassification",
66
+ "TFLayoutLMForQuestionAnswering",
67
+ "TFLayoutLMMainLayer",
68
+ "TFLayoutLMModel",
69
+ "TFLayoutLMPreTrainedModel",
70
+ ]
71
+
72
+
73
+ if TYPE_CHECKING:
74
+ from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMOnnxConfig
75
+ from .tokenization_layoutlm import LayoutLMTokenizer
76
+
77
+ try:
78
+ if not is_tokenizers_available():
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ pass
82
+ else:
83
+ from .tokenization_layoutlm_fast import LayoutLMTokenizerFast
84
+
85
+ try:
86
+ if not is_torch_available():
87
+ raise OptionalDependencyNotAvailable()
88
+ except OptionalDependencyNotAvailable:
89
+ pass
90
+ else:
91
+ from .modeling_layoutlm import (
92
+ LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
93
+ LayoutLMForMaskedLM,
94
+ LayoutLMForQuestionAnswering,
95
+ LayoutLMForSequenceClassification,
96
+ LayoutLMForTokenClassification,
97
+ LayoutLMModel,
98
+ LayoutLMPreTrainedModel,
99
+ )
100
+ try:
101
+ if not is_tf_available():
102
+ raise OptionalDependencyNotAvailable()
103
+ except OptionalDependencyNotAvailable:
104
+ pass
105
+ else:
106
+ from .modeling_tf_layoutlm import (
107
+ TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
108
+ TFLayoutLMForMaskedLM,
109
+ TFLayoutLMForQuestionAnswering,
110
+ TFLayoutLMForSequenceClassification,
111
+ TFLayoutLMForTokenClassification,
112
+ TFLayoutLMMainLayer,
113
+ TFLayoutLMModel,
114
+ TFLayoutLMPreTrainedModel,
115
+ )
116
+
117
+ else:
118
+ import sys
119
+
120
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/modeling_layoutlm.cpython-310.pyc ADDED
Binary file (41.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/modeling_tf_layoutlm.cpython-310.pyc ADDED
Binary file (50.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/tokenization_layoutlm.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/__pycache__/tokenization_layoutlm_fast.cpython-310.pyc ADDED
Binary file (6.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/configuration_layoutlm.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2010, The Microsoft Research Asia LayoutLM Team authors
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LayoutLM model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, List, Mapping, Optional
18
+
19
+ from ... import PretrainedConfig, PreTrainedTokenizer
20
+ from ...onnx import OnnxConfig, PatchingSpec
21
+ from ...utils import TensorType, is_torch_available, logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class LayoutLMConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`LayoutLMModel`]. It is used to instantiate a
33
+ LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
34
+ with the defaults will yield a similar configuration to that of the LayoutLM
35
+ [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) architecture.
36
+
37
+ Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`BertConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 30522):
43
+ Vocabulary size of the LayoutLM model. Defines the different tokens that can be represented by the
44
+ *inputs_ids* passed to the forward method of [`LayoutLMModel`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ intermediate_size (`int`, *optional*, defaults to 3072):
52
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for the attention probabilities.
60
+ max_position_embeddings (`int`, *optional*, defaults to 512):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ type_vocab_size (`int`, *optional*, defaults to 2):
64
+ The vocabulary size of the `token_type_ids` passed into [`LayoutLMModel`].
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ pad_token_id (`int`, *optional*, defaults to 0):
70
+ The value used to pad input_ids.
71
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
72
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
73
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
74
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
75
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
76
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
77
+ use_cache (`bool`, *optional*, defaults to `True`):
78
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
79
+ relevant if `config.is_decoder=True`.
80
+ max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
81
+ The maximum value that the 2D position embedding might ever used. Typically set this to something large
82
+ just in case (e.g., 1024).
83
+
84
+ Examples:
85
+
86
+ ```python
87
+ >>> from transformers import LayoutLMConfig, LayoutLMModel
88
+
89
+ >>> # Initializing a LayoutLM configuration
90
+ >>> configuration = LayoutLMConfig()
91
+
92
+ >>> # Initializing a model (with random weights) from the configuration
93
+ >>> model = LayoutLMModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "layoutlm"
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=30522,
104
+ hidden_size=768,
105
+ num_hidden_layers=12,
106
+ num_attention_heads=12,
107
+ intermediate_size=3072,
108
+ hidden_act="gelu",
109
+ hidden_dropout_prob=0.1,
110
+ attention_probs_dropout_prob=0.1,
111
+ max_position_embeddings=512,
112
+ type_vocab_size=2,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ pad_token_id=0,
116
+ position_embedding_type="absolute",
117
+ use_cache=True,
118
+ max_2d_position_embeddings=1024,
119
+ **kwargs,
120
+ ):
121
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
122
+ self.vocab_size = vocab_size
123
+ self.hidden_size = hidden_size
124
+ self.num_hidden_layers = num_hidden_layers
125
+ self.num_attention_heads = num_attention_heads
126
+ self.hidden_act = hidden_act
127
+ self.intermediate_size = intermediate_size
128
+ self.hidden_dropout_prob = hidden_dropout_prob
129
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
130
+ self.max_position_embeddings = max_position_embeddings
131
+ self.type_vocab_size = type_vocab_size
132
+ self.initializer_range = initializer_range
133
+ self.layer_norm_eps = layer_norm_eps
134
+ self.position_embedding_type = position_embedding_type
135
+ self.use_cache = use_cache
136
+ self.max_2d_position_embeddings = max_2d_position_embeddings
137
+
138
+
139
+ class LayoutLMOnnxConfig(OnnxConfig):
140
+ def __init__(
141
+ self,
142
+ config: PretrainedConfig,
143
+ task: str = "default",
144
+ patching_specs: List[PatchingSpec] = None,
145
+ ):
146
+ super().__init__(config, task=task, patching_specs=patching_specs)
147
+ self.max_2d_positions = config.max_2d_position_embeddings - 1
148
+
149
+ @property
150
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
151
+ return OrderedDict(
152
+ [
153
+ ("input_ids", {0: "batch", 1: "sequence"}),
154
+ ("bbox", {0: "batch", 1: "sequence"}),
155
+ ("attention_mask", {0: "batch", 1: "sequence"}),
156
+ ("token_type_ids", {0: "batch", 1: "sequence"}),
157
+ ]
158
+ )
159
+
160
+ def generate_dummy_inputs(
161
+ self,
162
+ tokenizer: PreTrainedTokenizer,
163
+ batch_size: int = -1,
164
+ seq_length: int = -1,
165
+ is_pair: bool = False,
166
+ framework: Optional[TensorType] = None,
167
+ ) -> Mapping[str, Any]:
168
+ """
169
+ Generate inputs to provide to the ONNX exporter for the specific framework
170
+
171
+ Args:
172
+ tokenizer: The tokenizer associated with this model configuration
173
+ batch_size: The batch size (int) to export the model for (-1 means dynamic axis)
174
+ seq_length: The sequence length (int) to export the model for (-1 means dynamic axis)
175
+ is_pair: Indicate if the input is a pair (sentence 1, sentence 2)
176
+ framework: The framework (optional) the tokenizer will generate tensor for
177
+
178
+ Returns:
179
+ Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
180
+ """
181
+
182
+ input_dict = super().generate_dummy_inputs(
183
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
184
+ )
185
+
186
+ # Generate a dummy bbox
187
+ box = [48, 84, 73, 128]
188
+
189
+ if not framework == TensorType.PYTORCH:
190
+ raise NotImplementedError("Exporting LayoutLM to ONNX is currently only supported for PyTorch.")
191
+
192
+ if not is_torch_available():
193
+ raise ValueError("Cannot generate dummy inputs without PyTorch installed.")
194
+ import torch
195
+
196
+ batch_size, seq_length = input_dict["input_ids"].shape
197
+ input_dict["bbox"] = torch.tensor([*[box] * seq_length]).tile(batch_size, 1, 1)
198
+ return input_dict
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlm/modeling_layoutlm.py ADDED
@@ -0,0 +1,1368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch LayoutLM model."""
16
+
17
+
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ MaskedLMOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
37
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
38
+ from .configuration_layoutlm import LayoutLMConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CONFIG_FOR_DOC = "LayoutLMConfig"
44
+ _CHECKPOINT_FOR_DOC = "microsoft/layoutlm-base-uncased"
45
+
46
+
47
+ from ..deprecated._archive_maps import LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
48
+
49
+
50
+ LayoutLMLayerNorm = nn.LayerNorm
51
+
52
+
53
+ class LayoutLMEmbeddings(nn.Module):
54
+ """Construct the embeddings from word, position and token_type embeddings."""
55
+
56
+ def __init__(self, config):
57
+ super(LayoutLMEmbeddings, self).__init__()
58
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
59
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
60
+ self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
61
+ self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
62
+ self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
63
+ self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
64
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
65
+
66
+ self.LayerNorm = LayoutLMLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
67
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
68
+
69
+ self.register_buffer(
70
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
71
+ )
72
+
73
+ def forward(
74
+ self,
75
+ input_ids=None,
76
+ bbox=None,
77
+ token_type_ids=None,
78
+ position_ids=None,
79
+ inputs_embeds=None,
80
+ ):
81
+ if input_ids is not None:
82
+ input_shape = input_ids.size()
83
+ else:
84
+ input_shape = inputs_embeds.size()[:-1]
85
+
86
+ seq_length = input_shape[1]
87
+
88
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
89
+
90
+ if position_ids is None:
91
+ position_ids = self.position_ids[:, :seq_length]
92
+
93
+ if token_type_ids is None:
94
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
95
+
96
+ if inputs_embeds is None:
97
+ inputs_embeds = self.word_embeddings(input_ids)
98
+
99
+ words_embeddings = inputs_embeds
100
+ position_embeddings = self.position_embeddings(position_ids)
101
+ try:
102
+ left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
103
+ upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
104
+ right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
105
+ lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
106
+ except IndexError as e:
107
+ raise IndexError("The `bbox`coordinate values should be within 0-1000 range.") from e
108
+
109
+ h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
110
+ w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
111
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
112
+
113
+ embeddings = (
114
+ words_embeddings
115
+ + position_embeddings
116
+ + left_position_embeddings
117
+ + upper_position_embeddings
118
+ + right_position_embeddings
119
+ + lower_position_embeddings
120
+ + h_position_embeddings
121
+ + w_position_embeddings
122
+ + token_type_embeddings
123
+ )
124
+ embeddings = self.LayerNorm(embeddings)
125
+ embeddings = self.dropout(embeddings)
126
+ return embeddings
127
+
128
+
129
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->LayoutLM
130
+ class LayoutLMSelfAttention(nn.Module):
131
+ def __init__(self, config, position_embedding_type=None):
132
+ super().__init__()
133
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
134
+ raise ValueError(
135
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
136
+ f"heads ({config.num_attention_heads})"
137
+ )
138
+
139
+ self.num_attention_heads = config.num_attention_heads
140
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
141
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
142
+
143
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
144
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
145
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
146
+
147
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
148
+ self.position_embedding_type = position_embedding_type or getattr(
149
+ config, "position_embedding_type", "absolute"
150
+ )
151
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
152
+ self.max_position_embeddings = config.max_position_embeddings
153
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
154
+
155
+ self.is_decoder = config.is_decoder
156
+
157
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
158
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
159
+ x = x.view(new_x_shape)
160
+ return x.permute(0, 2, 1, 3)
161
+
162
+ def forward(
163
+ self,
164
+ hidden_states: torch.Tensor,
165
+ attention_mask: Optional[torch.FloatTensor] = None,
166
+ head_mask: Optional[torch.FloatTensor] = None,
167
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
168
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
169
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
170
+ output_attentions: Optional[bool] = False,
171
+ ) -> Tuple[torch.Tensor]:
172
+ mixed_query_layer = self.query(hidden_states)
173
+
174
+ # If this is instantiated as a cross-attention module, the keys
175
+ # and values come from an encoder; the attention mask needs to be
176
+ # such that the encoder's padding tokens are not attended to.
177
+ is_cross_attention = encoder_hidden_states is not None
178
+
179
+ if is_cross_attention and past_key_value is not None:
180
+ # reuse k,v, cross_attentions
181
+ key_layer = past_key_value[0]
182
+ value_layer = past_key_value[1]
183
+ attention_mask = encoder_attention_mask
184
+ elif is_cross_attention:
185
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
186
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
187
+ attention_mask = encoder_attention_mask
188
+ elif past_key_value is not None:
189
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
190
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
191
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
192
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
193
+ else:
194
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
195
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
196
+
197
+ query_layer = self.transpose_for_scores(mixed_query_layer)
198
+
199
+ use_cache = past_key_value is not None
200
+ if self.is_decoder:
201
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
202
+ # Further calls to cross_attention layer can then reuse all cross-attention
203
+ # key/value_states (first "if" case)
204
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
205
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
206
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
207
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
208
+ past_key_value = (key_layer, value_layer)
209
+
210
+ # Take the dot product between "query" and "key" to get the raw attention scores.
211
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
212
+
213
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
214
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
215
+ if use_cache:
216
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
217
+ -1, 1
218
+ )
219
+ else:
220
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
221
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
222
+ distance = position_ids_l - position_ids_r
223
+
224
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
225
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
226
+
227
+ if self.position_embedding_type == "relative_key":
228
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
229
+ attention_scores = attention_scores + relative_position_scores
230
+ elif self.position_embedding_type == "relative_key_query":
231
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
232
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
233
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
234
+
235
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
236
+ if attention_mask is not None:
237
+ # Apply the attention mask is (precomputed for all layers in LayoutLMModel forward() function)
238
+ attention_scores = attention_scores + attention_mask
239
+
240
+ # Normalize the attention scores to probabilities.
241
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
242
+
243
+ # This is actually dropping out entire tokens to attend to, which might
244
+ # seem a bit unusual, but is taken from the original Transformer paper.
245
+ attention_probs = self.dropout(attention_probs)
246
+
247
+ # Mask heads if we want to
248
+ if head_mask is not None:
249
+ attention_probs = attention_probs * head_mask
250
+
251
+ context_layer = torch.matmul(attention_probs, value_layer)
252
+
253
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
254
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
255
+ context_layer = context_layer.view(new_context_layer_shape)
256
+
257
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
258
+
259
+ if self.is_decoder:
260
+ outputs = outputs + (past_key_value,)
261
+ return outputs
262
+
263
+
264
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->LayoutLM
265
+ class LayoutLMSelfOutput(nn.Module):
266
+ def __init__(self, config):
267
+ super().__init__()
268
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
269
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
270
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
271
+
272
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
273
+ hidden_states = self.dense(hidden_states)
274
+ hidden_states = self.dropout(hidden_states)
275
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
276
+ return hidden_states
277
+
278
+
279
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->LayoutLM
280
+ class LayoutLMAttention(nn.Module):
281
+ def __init__(self, config, position_embedding_type=None):
282
+ super().__init__()
283
+ self.self = LayoutLMSelfAttention(config, position_embedding_type=position_embedding_type)
284
+ self.output = LayoutLMSelfOutput(config)
285
+ self.pruned_heads = set()
286
+
287
+ def prune_heads(self, heads):
288
+ if len(heads) == 0:
289
+ return
290
+ heads, index = find_pruneable_heads_and_indices(
291
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
292
+ )
293
+
294
+ # Prune linear layers
295
+ self.self.query = prune_linear_layer(self.self.query, index)
296
+ self.self.key = prune_linear_layer(self.self.key, index)
297
+ self.self.value = prune_linear_layer(self.self.value, index)
298
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
299
+
300
+ # Update hyper params and store pruned heads
301
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
302
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
303
+ self.pruned_heads = self.pruned_heads.union(heads)
304
+
305
+ def forward(
306
+ self,
307
+ hidden_states: torch.Tensor,
308
+ attention_mask: Optional[torch.FloatTensor] = None,
309
+ head_mask: Optional[torch.FloatTensor] = None,
310
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
311
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
312
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
313
+ output_attentions: Optional[bool] = False,
314
+ ) -> Tuple[torch.Tensor]:
315
+ self_outputs = self.self(
316
+ hidden_states,
317
+ attention_mask,
318
+ head_mask,
319
+ encoder_hidden_states,
320
+ encoder_attention_mask,
321
+ past_key_value,
322
+ output_attentions,
323
+ )
324
+ attention_output = self.output(self_outputs[0], hidden_states)
325
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
326
+ return outputs
327
+
328
+
329
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
330
+ class LayoutLMIntermediate(nn.Module):
331
+ def __init__(self, config):
332
+ super().__init__()
333
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
334
+ if isinstance(config.hidden_act, str):
335
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
336
+ else:
337
+ self.intermediate_act_fn = config.hidden_act
338
+
339
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
340
+ hidden_states = self.dense(hidden_states)
341
+ hidden_states = self.intermediate_act_fn(hidden_states)
342
+ return hidden_states
343
+
344
+
345
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM
346
+ class LayoutLMOutput(nn.Module):
347
+ def __init__(self, config):
348
+ super().__init__()
349
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
350
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
351
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
352
+
353
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
354
+ hidden_states = self.dense(hidden_states)
355
+ hidden_states = self.dropout(hidden_states)
356
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
357
+ return hidden_states
358
+
359
+
360
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->LayoutLM
361
+ class LayoutLMLayer(nn.Module):
362
+ def __init__(self, config):
363
+ super().__init__()
364
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
365
+ self.seq_len_dim = 1
366
+ self.attention = LayoutLMAttention(config)
367
+ self.is_decoder = config.is_decoder
368
+ self.add_cross_attention = config.add_cross_attention
369
+ if self.add_cross_attention:
370
+ if not self.is_decoder:
371
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
372
+ self.crossattention = LayoutLMAttention(config, position_embedding_type="absolute")
373
+ self.intermediate = LayoutLMIntermediate(config)
374
+ self.output = LayoutLMOutput(config)
375
+
376
+ def forward(
377
+ self,
378
+ hidden_states: torch.Tensor,
379
+ attention_mask: Optional[torch.FloatTensor] = None,
380
+ head_mask: Optional[torch.FloatTensor] = None,
381
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
382
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
383
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
384
+ output_attentions: Optional[bool] = False,
385
+ ) -> Tuple[torch.Tensor]:
386
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
387
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
388
+ self_attention_outputs = self.attention(
389
+ hidden_states,
390
+ attention_mask,
391
+ head_mask,
392
+ output_attentions=output_attentions,
393
+ past_key_value=self_attn_past_key_value,
394
+ )
395
+ attention_output = self_attention_outputs[0]
396
+
397
+ # if decoder, the last output is tuple of self-attn cache
398
+ if self.is_decoder:
399
+ outputs = self_attention_outputs[1:-1]
400
+ present_key_value = self_attention_outputs[-1]
401
+ else:
402
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
403
+
404
+ cross_attn_present_key_value = None
405
+ if self.is_decoder and encoder_hidden_states is not None:
406
+ if not hasattr(self, "crossattention"):
407
+ raise ValueError(
408
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
409
+ " by setting `config.add_cross_attention=True`"
410
+ )
411
+
412
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
413
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
414
+ cross_attention_outputs = self.crossattention(
415
+ attention_output,
416
+ attention_mask,
417
+ head_mask,
418
+ encoder_hidden_states,
419
+ encoder_attention_mask,
420
+ cross_attn_past_key_value,
421
+ output_attentions,
422
+ )
423
+ attention_output = cross_attention_outputs[0]
424
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
425
+
426
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
427
+ cross_attn_present_key_value = cross_attention_outputs[-1]
428
+ present_key_value = present_key_value + cross_attn_present_key_value
429
+
430
+ layer_output = apply_chunking_to_forward(
431
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
432
+ )
433
+ outputs = (layer_output,) + outputs
434
+
435
+ # if decoder, return the attn key/values as the last output
436
+ if self.is_decoder:
437
+ outputs = outputs + (present_key_value,)
438
+
439
+ return outputs
440
+
441
+ def feed_forward_chunk(self, attention_output):
442
+ intermediate_output = self.intermediate(attention_output)
443
+ layer_output = self.output(intermediate_output, attention_output)
444
+ return layer_output
445
+
446
+
447
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->LayoutLM
448
+ class LayoutLMEncoder(nn.Module):
449
+ def __init__(self, config):
450
+ super().__init__()
451
+ self.config = config
452
+ self.layer = nn.ModuleList([LayoutLMLayer(config) for _ in range(config.num_hidden_layers)])
453
+ self.gradient_checkpointing = False
454
+
455
+ def forward(
456
+ self,
457
+ hidden_states: torch.Tensor,
458
+ attention_mask: Optional[torch.FloatTensor] = None,
459
+ head_mask: Optional[torch.FloatTensor] = None,
460
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
461
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
462
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
463
+ use_cache: Optional[bool] = None,
464
+ output_attentions: Optional[bool] = False,
465
+ output_hidden_states: Optional[bool] = False,
466
+ return_dict: Optional[bool] = True,
467
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
468
+ all_hidden_states = () if output_hidden_states else None
469
+ all_self_attentions = () if output_attentions else None
470
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
471
+
472
+ if self.gradient_checkpointing and self.training:
473
+ if use_cache:
474
+ logger.warning_once(
475
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
476
+ )
477
+ use_cache = False
478
+
479
+ next_decoder_cache = () if use_cache else None
480
+ for i, layer_module in enumerate(self.layer):
481
+ if output_hidden_states:
482
+ all_hidden_states = all_hidden_states + (hidden_states,)
483
+
484
+ layer_head_mask = head_mask[i] if head_mask is not None else None
485
+ past_key_value = past_key_values[i] if past_key_values is not None else None
486
+
487
+ if self.gradient_checkpointing and self.training:
488
+ layer_outputs = self._gradient_checkpointing_func(
489
+ layer_module.__call__,
490
+ hidden_states,
491
+ attention_mask,
492
+ layer_head_mask,
493
+ encoder_hidden_states,
494
+ encoder_attention_mask,
495
+ past_key_value,
496
+ output_attentions,
497
+ )
498
+ else:
499
+ layer_outputs = layer_module(
500
+ hidden_states,
501
+ attention_mask,
502
+ layer_head_mask,
503
+ encoder_hidden_states,
504
+ encoder_attention_mask,
505
+ past_key_value,
506
+ output_attentions,
507
+ )
508
+
509
+ hidden_states = layer_outputs[0]
510
+ if use_cache:
511
+ next_decoder_cache += (layer_outputs[-1],)
512
+ if output_attentions:
513
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
514
+ if self.config.add_cross_attention:
515
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
516
+
517
+ if output_hidden_states:
518
+ all_hidden_states = all_hidden_states + (hidden_states,)
519
+
520
+ if not return_dict:
521
+ return tuple(
522
+ v
523
+ for v in [
524
+ hidden_states,
525
+ next_decoder_cache,
526
+ all_hidden_states,
527
+ all_self_attentions,
528
+ all_cross_attentions,
529
+ ]
530
+ if v is not None
531
+ )
532
+ return BaseModelOutputWithPastAndCrossAttentions(
533
+ last_hidden_state=hidden_states,
534
+ past_key_values=next_decoder_cache,
535
+ hidden_states=all_hidden_states,
536
+ attentions=all_self_attentions,
537
+ cross_attentions=all_cross_attentions,
538
+ )
539
+
540
+
541
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
542
+ class LayoutLMPooler(nn.Module):
543
+ def __init__(self, config):
544
+ super().__init__()
545
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
546
+ self.activation = nn.Tanh()
547
+
548
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
549
+ # We "pool" the model by simply taking the hidden state corresponding
550
+ # to the first token.
551
+ first_token_tensor = hidden_states[:, 0]
552
+ pooled_output = self.dense(first_token_tensor)
553
+ pooled_output = self.activation(pooled_output)
554
+ return pooled_output
555
+
556
+
557
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->LayoutLM
558
+ class LayoutLMPredictionHeadTransform(nn.Module):
559
+ def __init__(self, config):
560
+ super().__init__()
561
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
562
+ if isinstance(config.hidden_act, str):
563
+ self.transform_act_fn = ACT2FN[config.hidden_act]
564
+ else:
565
+ self.transform_act_fn = config.hidden_act
566
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
567
+
568
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
569
+ hidden_states = self.dense(hidden_states)
570
+ hidden_states = self.transform_act_fn(hidden_states)
571
+ hidden_states = self.LayerNorm(hidden_states)
572
+ return hidden_states
573
+
574
+
575
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->LayoutLM
576
+ class LayoutLMLMPredictionHead(nn.Module):
577
+ def __init__(self, config):
578
+ super().__init__()
579
+ self.transform = LayoutLMPredictionHeadTransform(config)
580
+
581
+ # The output weights are the same as the input embeddings, but there is
582
+ # an output-only bias for each token.
583
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
584
+
585
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
586
+
587
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
588
+ self.decoder.bias = self.bias
589
+
590
+ def forward(self, hidden_states):
591
+ hidden_states = self.transform(hidden_states)
592
+ hidden_states = self.decoder(hidden_states)
593
+ return hidden_states
594
+
595
+
596
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LayoutLM
597
+ class LayoutLMOnlyMLMHead(nn.Module):
598
+ def __init__(self, config):
599
+ super().__init__()
600
+ self.predictions = LayoutLMLMPredictionHead(config)
601
+
602
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
603
+ prediction_scores = self.predictions(sequence_output)
604
+ return prediction_scores
605
+
606
+
607
+ class LayoutLMPreTrainedModel(PreTrainedModel):
608
+ """
609
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
610
+ models.
611
+ """
612
+
613
+ config_class = LayoutLMConfig
614
+ base_model_prefix = "layoutlm"
615
+ supports_gradient_checkpointing = True
616
+
617
+ def _init_weights(self, module):
618
+ """Initialize the weights"""
619
+ if isinstance(module, nn.Linear):
620
+ # Slightly different from the TF version which uses truncated_normal for initialization
621
+ # cf https://github.com/pytorch/pytorch/pull/5617
622
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
623
+ if module.bias is not None:
624
+ module.bias.data.zero_()
625
+ elif isinstance(module, nn.Embedding):
626
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
627
+ if module.padding_idx is not None:
628
+ module.weight.data[module.padding_idx].zero_()
629
+ elif isinstance(module, LayoutLMLayerNorm):
630
+ module.bias.data.zero_()
631
+ module.weight.data.fill_(1.0)
632
+
633
+
634
+ LAYOUTLM_START_DOCSTRING = r"""
635
+ The LayoutLM model was proposed in [LayoutLM: Pre-training of Text and Layout for Document Image
636
+ Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei and
637
+ Ming Zhou.
638
+
639
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
640
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
641
+ behavior.
642
+
643
+ Parameters:
644
+ config ([`LayoutLMConfig`]): Model configuration class with all the parameters of the model.
645
+ Initializing with a config file does not load the weights associated with the model, only the
646
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
647
+ """
648
+
649
+ LAYOUTLM_INPUTS_DOCSTRING = r"""
650
+ Args:
651
+ input_ids (`torch.LongTensor` of shape `({0})`):
652
+ Indices of input sequence tokens in the vocabulary.
653
+
654
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
655
+ [`PreTrainedTokenizer.__call__`] for details.
656
+
657
+ [What are input IDs?](../glossary#input-ids)
658
+ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
659
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
660
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
661
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
662
+ y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
663
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
664
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: `1` for
665
+ tokens that are NOT MASKED, `0` for MASKED tokens.
666
+
667
+ [What are attention masks?](../glossary#attention-mask)
668
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
669
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
670
+ 1]`: `0` corresponds to a *sentence A* token, `1` corresponds to a *sentence B* token
671
+
672
+ [What are token type IDs?](../glossary#token-type-ids)
673
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
674
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
675
+ config.max_position_embeddings - 1]`.
676
+
677
+ [What are position IDs?](../glossary#position-ids)
678
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
679
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: `1`
680
+ indicates the head is **not masked**, `0` indicates the head is **masked**.
681
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
682
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
683
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
684
+ model's internal embedding lookup matrix.
685
+ output_attentions (`bool`, *optional*):
686
+ If set to `True`, the attentions tensors of all attention layers are returned. See `attentions` under
687
+ returned tensors for more detail.
688
+ output_hidden_states (`bool`, *optional*):
689
+ If set to `True`, the hidden states of all layers are returned. See `hidden_states` under returned tensors
690
+ for more detail.
691
+ return_dict (`bool`, *optional*):
692
+ If set to `True`, the model will return a [`~utils.ModelOutput`] instead of a plain tuple.
693
+ """
694
+
695
+
696
+ @add_start_docstrings(
697
+ "The bare LayoutLM Model transformer outputting raw hidden-states without any specific head on top.",
698
+ LAYOUTLM_START_DOCSTRING,
699
+ )
700
+ class LayoutLMModel(LayoutLMPreTrainedModel):
701
+ def __init__(self, config):
702
+ super(LayoutLMModel, self).__init__(config)
703
+ self.config = config
704
+
705
+ self.embeddings = LayoutLMEmbeddings(config)
706
+ self.encoder = LayoutLMEncoder(config)
707
+ self.pooler = LayoutLMPooler(config)
708
+
709
+ # Initialize weights and apply final processing
710
+ self.post_init()
711
+
712
+ def get_input_embeddings(self):
713
+ return self.embeddings.word_embeddings
714
+
715
+ def set_input_embeddings(self, value):
716
+ self.embeddings.word_embeddings = value
717
+
718
+ def _prune_heads(self, heads_to_prune):
719
+ """
720
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
721
+ class PreTrainedModel
722
+ """
723
+ for layer, heads in heads_to_prune.items():
724
+ self.encoder.layer[layer].attention.prune_heads(heads)
725
+
726
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
727
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
728
+ def forward(
729
+ self,
730
+ input_ids: Optional[torch.LongTensor] = None,
731
+ bbox: Optional[torch.LongTensor] = None,
732
+ attention_mask: Optional[torch.FloatTensor] = None,
733
+ token_type_ids: Optional[torch.LongTensor] = None,
734
+ position_ids: Optional[torch.LongTensor] = None,
735
+ head_mask: Optional[torch.FloatTensor] = None,
736
+ inputs_embeds: Optional[torch.FloatTensor] = None,
737
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
738
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
739
+ output_attentions: Optional[bool] = None,
740
+ output_hidden_states: Optional[bool] = None,
741
+ return_dict: Optional[bool] = None,
742
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
743
+ r"""
744
+ Returns:
745
+
746
+ Examples:
747
+
748
+ ```python
749
+ >>> from transformers import AutoTokenizer, LayoutLMModel
750
+ >>> import torch
751
+
752
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
753
+ >>> model = LayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased")
754
+
755
+ >>> words = ["Hello", "world"]
756
+ >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
757
+
758
+ >>> token_boxes = []
759
+ >>> for word, box in zip(words, normalized_word_boxes):
760
+ ... word_tokens = tokenizer.tokenize(word)
761
+ ... token_boxes.extend([box] * len(word_tokens))
762
+ >>> # add bounding boxes of cls + sep tokens
763
+ >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
764
+
765
+ >>> encoding = tokenizer(" ".join(words), return_tensors="pt")
766
+ >>> input_ids = encoding["input_ids"]
767
+ >>> attention_mask = encoding["attention_mask"]
768
+ >>> token_type_ids = encoding["token_type_ids"]
769
+ >>> bbox = torch.tensor([token_boxes])
770
+
771
+ >>> outputs = model(
772
+ ... input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids
773
+ ... )
774
+
775
+ >>> last_hidden_states = outputs.last_hidden_state
776
+ ```"""
777
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
778
+ output_hidden_states = (
779
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
780
+ )
781
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
782
+
783
+ if input_ids is not None and inputs_embeds is not None:
784
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
785
+ elif input_ids is not None:
786
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
787
+ input_shape = input_ids.size()
788
+ elif inputs_embeds is not None:
789
+ input_shape = inputs_embeds.size()[:-1]
790
+ else:
791
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
792
+
793
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
794
+
795
+ if attention_mask is None:
796
+ attention_mask = torch.ones(input_shape, device=device)
797
+ if token_type_ids is None:
798
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
799
+
800
+ if bbox is None:
801
+ bbox = torch.zeros(input_shape + (4,), dtype=torch.long, device=device)
802
+
803
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
804
+
805
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
806
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
807
+
808
+ if head_mask is not None:
809
+ if head_mask.dim() == 1:
810
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
811
+ head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
812
+ elif head_mask.dim() == 2:
813
+ head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
814
+ head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
815
+ else:
816
+ head_mask = [None] * self.config.num_hidden_layers
817
+
818
+ embedding_output = self.embeddings(
819
+ input_ids=input_ids,
820
+ bbox=bbox,
821
+ position_ids=position_ids,
822
+ token_type_ids=token_type_ids,
823
+ inputs_embeds=inputs_embeds,
824
+ )
825
+ encoder_outputs = self.encoder(
826
+ embedding_output,
827
+ extended_attention_mask,
828
+ head_mask=head_mask,
829
+ output_attentions=output_attentions,
830
+ output_hidden_states=output_hidden_states,
831
+ return_dict=return_dict,
832
+ )
833
+ sequence_output = encoder_outputs[0]
834
+ pooled_output = self.pooler(sequence_output)
835
+
836
+ if not return_dict:
837
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
838
+
839
+ return BaseModelOutputWithPoolingAndCrossAttentions(
840
+ last_hidden_state=sequence_output,
841
+ pooler_output=pooled_output,
842
+ hidden_states=encoder_outputs.hidden_states,
843
+ attentions=encoder_outputs.attentions,
844
+ cross_attentions=encoder_outputs.cross_attentions,
845
+ )
846
+
847
+
848
+ @add_start_docstrings("""LayoutLM Model with a `language modeling` head on top.""", LAYOUTLM_START_DOCSTRING)
849
+ class LayoutLMForMaskedLM(LayoutLMPreTrainedModel):
850
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
851
+
852
+ def __init__(self, config):
853
+ super().__init__(config)
854
+
855
+ self.layoutlm = LayoutLMModel(config)
856
+ self.cls = LayoutLMOnlyMLMHead(config)
857
+
858
+ # Initialize weights and apply final processing
859
+ self.post_init()
860
+
861
+ def get_input_embeddings(self):
862
+ return self.layoutlm.embeddings.word_embeddings
863
+
864
+ def get_output_embeddings(self):
865
+ return self.cls.predictions.decoder
866
+
867
+ def set_output_embeddings(self, new_embeddings):
868
+ self.cls.predictions.decoder = new_embeddings
869
+
870
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
871
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
872
+ def forward(
873
+ self,
874
+ input_ids: Optional[torch.LongTensor] = None,
875
+ bbox: Optional[torch.LongTensor] = None,
876
+ attention_mask: Optional[torch.FloatTensor] = None,
877
+ token_type_ids: Optional[torch.LongTensor] = None,
878
+ position_ids: Optional[torch.LongTensor] = None,
879
+ head_mask: Optional[torch.FloatTensor] = None,
880
+ inputs_embeds: Optional[torch.FloatTensor] = None,
881
+ labels: Optional[torch.LongTensor] = None,
882
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
883
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
884
+ output_attentions: Optional[bool] = None,
885
+ output_hidden_states: Optional[bool] = None,
886
+ return_dict: Optional[bool] = None,
887
+ ) -> Union[Tuple, MaskedLMOutput]:
888
+ r"""
889
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
890
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
891
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
892
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
893
+
894
+ Returns:
895
+
896
+ Examples:
897
+
898
+ ```python
899
+ >>> from transformers import AutoTokenizer, LayoutLMForMaskedLM
900
+ >>> import torch
901
+
902
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
903
+ >>> model = LayoutLMForMaskedLM.from_pretrained("microsoft/layoutlm-base-uncased")
904
+
905
+ >>> words = ["Hello", "[MASK]"]
906
+ >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
907
+
908
+ >>> token_boxes = []
909
+ >>> for word, box in zip(words, normalized_word_boxes):
910
+ ... word_tokens = tokenizer.tokenize(word)
911
+ ... token_boxes.extend([box] * len(word_tokens))
912
+ >>> # add bounding boxes of cls + sep tokens
913
+ >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
914
+
915
+ >>> encoding = tokenizer(" ".join(words), return_tensors="pt")
916
+ >>> input_ids = encoding["input_ids"]
917
+ >>> attention_mask = encoding["attention_mask"]
918
+ >>> token_type_ids = encoding["token_type_ids"]
919
+ >>> bbox = torch.tensor([token_boxes])
920
+
921
+ >>> labels = tokenizer("Hello world", return_tensors="pt")["input_ids"]
922
+
923
+ >>> outputs = model(
924
+ ... input_ids=input_ids,
925
+ ... bbox=bbox,
926
+ ... attention_mask=attention_mask,
927
+ ... token_type_ids=token_type_ids,
928
+ ... labels=labels,
929
+ ... )
930
+
931
+ >>> loss = outputs.loss
932
+ ```"""
933
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
934
+
935
+ outputs = self.layoutlm(
936
+ input_ids,
937
+ bbox,
938
+ attention_mask=attention_mask,
939
+ token_type_ids=token_type_ids,
940
+ position_ids=position_ids,
941
+ head_mask=head_mask,
942
+ inputs_embeds=inputs_embeds,
943
+ encoder_hidden_states=encoder_hidden_states,
944
+ encoder_attention_mask=encoder_attention_mask,
945
+ output_attentions=output_attentions,
946
+ output_hidden_states=output_hidden_states,
947
+ return_dict=return_dict,
948
+ )
949
+
950
+ sequence_output = outputs[0]
951
+ prediction_scores = self.cls(sequence_output)
952
+
953
+ masked_lm_loss = None
954
+ if labels is not None:
955
+ loss_fct = CrossEntropyLoss()
956
+ masked_lm_loss = loss_fct(
957
+ prediction_scores.view(-1, self.config.vocab_size),
958
+ labels.view(-1),
959
+ )
960
+
961
+ if not return_dict:
962
+ output = (prediction_scores,) + outputs[2:]
963
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
964
+
965
+ return MaskedLMOutput(
966
+ loss=masked_lm_loss,
967
+ logits=prediction_scores,
968
+ hidden_states=outputs.hidden_states,
969
+ attentions=outputs.attentions,
970
+ )
971
+
972
+
973
+ @add_start_docstrings(
974
+ """
975
+ LayoutLM Model with a sequence classification head on top (a linear layer on top of the pooled output) e.g. for
976
+ document image classification tasks such as the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
977
+ """,
978
+ LAYOUTLM_START_DOCSTRING,
979
+ )
980
+ class LayoutLMForSequenceClassification(LayoutLMPreTrainedModel):
981
+ def __init__(self, config):
982
+ super().__init__(config)
983
+ self.num_labels = config.num_labels
984
+ self.layoutlm = LayoutLMModel(config)
985
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
986
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
987
+
988
+ # Initialize weights and apply final processing
989
+ self.post_init()
990
+
991
+ def get_input_embeddings(self):
992
+ return self.layoutlm.embeddings.word_embeddings
993
+
994
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
995
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
996
+ def forward(
997
+ self,
998
+ input_ids: Optional[torch.LongTensor] = None,
999
+ bbox: Optional[torch.LongTensor] = None,
1000
+ attention_mask: Optional[torch.FloatTensor] = None,
1001
+ token_type_ids: Optional[torch.LongTensor] = None,
1002
+ position_ids: Optional[torch.LongTensor] = None,
1003
+ head_mask: Optional[torch.FloatTensor] = None,
1004
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1005
+ labels: Optional[torch.LongTensor] = None,
1006
+ output_attentions: Optional[bool] = None,
1007
+ output_hidden_states: Optional[bool] = None,
1008
+ return_dict: Optional[bool] = None,
1009
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1010
+ r"""
1011
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1012
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1013
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1014
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1015
+
1016
+ Returns:
1017
+
1018
+ Examples:
1019
+
1020
+ ```python
1021
+ >>> from transformers import AutoTokenizer, LayoutLMForSequenceClassification
1022
+ >>> import torch
1023
+
1024
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
1025
+ >>> model = LayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased")
1026
+
1027
+ >>> words = ["Hello", "world"]
1028
+ >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
1029
+
1030
+ >>> token_boxes = []
1031
+ >>> for word, box in zip(words, normalized_word_boxes):
1032
+ ... word_tokens = tokenizer.tokenize(word)
1033
+ ... token_boxes.extend([box] * len(word_tokens))
1034
+ >>> # add bounding boxes of cls + sep tokens
1035
+ >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
1036
+
1037
+ >>> encoding = tokenizer(" ".join(words), return_tensors="pt")
1038
+ >>> input_ids = encoding["input_ids"]
1039
+ >>> attention_mask = encoding["attention_mask"]
1040
+ >>> token_type_ids = encoding["token_type_ids"]
1041
+ >>> bbox = torch.tensor([token_boxes])
1042
+ >>> sequence_label = torch.tensor([1])
1043
+
1044
+ >>> outputs = model(
1045
+ ... input_ids=input_ids,
1046
+ ... bbox=bbox,
1047
+ ... attention_mask=attention_mask,
1048
+ ... token_type_ids=token_type_ids,
1049
+ ... labels=sequence_label,
1050
+ ... )
1051
+
1052
+ >>> loss = outputs.loss
1053
+ >>> logits = outputs.logits
1054
+ ```"""
1055
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1056
+
1057
+ outputs = self.layoutlm(
1058
+ input_ids=input_ids,
1059
+ bbox=bbox,
1060
+ attention_mask=attention_mask,
1061
+ token_type_ids=token_type_ids,
1062
+ position_ids=position_ids,
1063
+ head_mask=head_mask,
1064
+ inputs_embeds=inputs_embeds,
1065
+ output_attentions=output_attentions,
1066
+ output_hidden_states=output_hidden_states,
1067
+ return_dict=return_dict,
1068
+ )
1069
+
1070
+ pooled_output = outputs[1]
1071
+
1072
+ pooled_output = self.dropout(pooled_output)
1073
+ logits = self.classifier(pooled_output)
1074
+
1075
+ loss = None
1076
+ if labels is not None:
1077
+ if self.config.problem_type is None:
1078
+ if self.num_labels == 1:
1079
+ self.config.problem_type = "regression"
1080
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1081
+ self.config.problem_type = "single_label_classification"
1082
+ else:
1083
+ self.config.problem_type = "multi_label_classification"
1084
+
1085
+ if self.config.problem_type == "regression":
1086
+ loss_fct = MSELoss()
1087
+ if self.num_labels == 1:
1088
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1089
+ else:
1090
+ loss = loss_fct(logits, labels)
1091
+ elif self.config.problem_type == "single_label_classification":
1092
+ loss_fct = CrossEntropyLoss()
1093
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1094
+ elif self.config.problem_type == "multi_label_classification":
1095
+ loss_fct = BCEWithLogitsLoss()
1096
+ loss = loss_fct(logits, labels)
1097
+ if not return_dict:
1098
+ output = (logits,) + outputs[2:]
1099
+ return ((loss,) + output) if loss is not None else output
1100
+
1101
+ return SequenceClassifierOutput(
1102
+ loss=loss,
1103
+ logits=logits,
1104
+ hidden_states=outputs.hidden_states,
1105
+ attentions=outputs.attentions,
1106
+ )
1107
+
1108
+
1109
+ @add_start_docstrings(
1110
+ """
1111
+ LayoutLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1112
+ sequence labeling (information extraction) tasks such as the [FUNSD](https://guillaumejaume.github.io/FUNSD/)
1113
+ dataset and the [SROIE](https://rrc.cvc.uab.es/?ch=13) dataset.
1114
+ """,
1115
+ LAYOUTLM_START_DOCSTRING,
1116
+ )
1117
+ class LayoutLMForTokenClassification(LayoutLMPreTrainedModel):
1118
+ def __init__(self, config):
1119
+ super().__init__(config)
1120
+ self.num_labels = config.num_labels
1121
+ self.layoutlm = LayoutLMModel(config)
1122
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1123
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1124
+
1125
+ # Initialize weights and apply final processing
1126
+ self.post_init()
1127
+
1128
+ def get_input_embeddings(self):
1129
+ return self.layoutlm.embeddings.word_embeddings
1130
+
1131
+ @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1132
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1133
+ def forward(
1134
+ self,
1135
+ input_ids: Optional[torch.LongTensor] = None,
1136
+ bbox: Optional[torch.LongTensor] = None,
1137
+ attention_mask: Optional[torch.FloatTensor] = None,
1138
+ token_type_ids: Optional[torch.LongTensor] = None,
1139
+ position_ids: Optional[torch.LongTensor] = None,
1140
+ head_mask: Optional[torch.FloatTensor] = None,
1141
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1142
+ labels: Optional[torch.LongTensor] = None,
1143
+ output_attentions: Optional[bool] = None,
1144
+ output_hidden_states: Optional[bool] = None,
1145
+ return_dict: Optional[bool] = None,
1146
+ ) -> Union[Tuple, TokenClassifierOutput]:
1147
+ r"""
1148
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1149
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1150
+
1151
+ Returns:
1152
+
1153
+ Examples:
1154
+
1155
+ ```python
1156
+ >>> from transformers import AutoTokenizer, LayoutLMForTokenClassification
1157
+ >>> import torch
1158
+
1159
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased")
1160
+ >>> model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased")
1161
+
1162
+ >>> words = ["Hello", "world"]
1163
+ >>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]
1164
+
1165
+ >>> token_boxes = []
1166
+ >>> for word, box in zip(words, normalized_word_boxes):
1167
+ ... word_tokens = tokenizer.tokenize(word)
1168
+ ... token_boxes.extend([box] * len(word_tokens))
1169
+ >>> # add bounding boxes of cls + sep tokens
1170
+ >>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]
1171
+
1172
+ >>> encoding = tokenizer(" ".join(words), return_tensors="pt")
1173
+ >>> input_ids = encoding["input_ids"]
1174
+ >>> attention_mask = encoding["attention_mask"]
1175
+ >>> token_type_ids = encoding["token_type_ids"]
1176
+ >>> bbox = torch.tensor([token_boxes])
1177
+ >>> token_labels = torch.tensor([1, 1, 0, 0]).unsqueeze(0) # batch size of 1
1178
+
1179
+ >>> outputs = model(
1180
+ ... input_ids=input_ids,
1181
+ ... bbox=bbox,
1182
+ ... attention_mask=attention_mask,
1183
+ ... token_type_ids=token_type_ids,
1184
+ ... labels=token_labels,
1185
+ ... )
1186
+
1187
+ >>> loss = outputs.loss
1188
+ >>> logits = outputs.logits
1189
+ ```"""
1190
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1191
+
1192
+ outputs = self.layoutlm(
1193
+ input_ids=input_ids,
1194
+ bbox=bbox,
1195
+ attention_mask=attention_mask,
1196
+ token_type_ids=token_type_ids,
1197
+ position_ids=position_ids,
1198
+ head_mask=head_mask,
1199
+ inputs_embeds=inputs_embeds,
1200
+ output_attentions=output_attentions,
1201
+ output_hidden_states=output_hidden_states,
1202
+ return_dict=return_dict,
1203
+ )
1204
+
1205
+ sequence_output = outputs[0]
1206
+
1207
+ sequence_output = self.dropout(sequence_output)
1208
+ logits = self.classifier(sequence_output)
1209
+
1210
+ loss = None
1211
+ if labels is not None:
1212
+ loss_fct = CrossEntropyLoss()
1213
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1214
+
1215
+ if not return_dict:
1216
+ output = (logits,) + outputs[2:]
1217
+ return ((loss,) + output) if loss is not None else output
1218
+
1219
+ return TokenClassifierOutput(
1220
+ loss=loss,
1221
+ logits=logits,
1222
+ hidden_states=outputs.hidden_states,
1223
+ attentions=outputs.attentions,
1224
+ )
1225
+
1226
+
1227
+ @add_start_docstrings(
1228
+ """
1229
+ LayoutLM Model with a span classification head on top for extractive question-answering tasks such as
1230
+ [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the final hidden-states output to compute `span
1231
+ start logits` and `span end logits`).
1232
+ """,
1233
+ LAYOUTLM_START_DOCSTRING,
1234
+ )
1235
+ class LayoutLMForQuestionAnswering(LayoutLMPreTrainedModel):
1236
+ def __init__(self, config, has_visual_segment_embedding=True):
1237
+ super().__init__(config)
1238
+ self.num_labels = config.num_labels
1239
+
1240
+ self.layoutlm = LayoutLMModel(config)
1241
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1242
+
1243
+ # Initialize weights and apply final processing
1244
+ self.post_init()
1245
+
1246
+ def get_input_embeddings(self):
1247
+ return self.layoutlm.embeddings.word_embeddings
1248
+
1249
+ @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
1250
+ def forward(
1251
+ self,
1252
+ input_ids: Optional[torch.LongTensor] = None,
1253
+ bbox: Optional[torch.LongTensor] = None,
1254
+ attention_mask: Optional[torch.FloatTensor] = None,
1255
+ token_type_ids: Optional[torch.LongTensor] = None,
1256
+ position_ids: Optional[torch.LongTensor] = None,
1257
+ head_mask: Optional[torch.FloatTensor] = None,
1258
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1259
+ start_positions: Optional[torch.LongTensor] = None,
1260
+ end_positions: Optional[torch.LongTensor] = None,
1261
+ output_attentions: Optional[bool] = None,
1262
+ output_hidden_states: Optional[bool] = None,
1263
+ return_dict: Optional[bool] = None,
1264
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1265
+ r"""
1266
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1267
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1268
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1269
+ are not taken into account for computing the loss.
1270
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1271
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1272
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1273
+ are not taken into account for computing the loss.
1274
+
1275
+ Returns:
1276
+
1277
+ Example:
1278
+
1279
+ In the example below, we prepare a question + context pair for the LayoutLM model. It will give us a prediction
1280
+ of what it thinks the answer is (the span of the answer within the texts parsed from the image).
1281
+
1282
+ ```python
1283
+ >>> from transformers import AutoTokenizer, LayoutLMForQuestionAnswering
1284
+ >>> from datasets import load_dataset
1285
+ >>> import torch
1286
+
1287
+ >>> tokenizer = AutoTokenizer.from_pretrained("impira/layoutlm-document-qa", add_prefix_space=True)
1288
+ >>> model = LayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa", revision="1e3ebac")
1289
+
1290
+ >>> dataset = load_dataset("nielsr/funsd", split="train")
1291
+ >>> example = dataset[0]
1292
+ >>> question = "what's his name?"
1293
+ >>> words = example["words"]
1294
+ >>> boxes = example["bboxes"]
1295
+
1296
+ >>> encoding = tokenizer(
1297
+ ... question.split(), words, is_split_into_words=True, return_token_type_ids=True, return_tensors="pt"
1298
+ ... )
1299
+ >>> bbox = []
1300
+ >>> for i, s, w in zip(encoding.input_ids[0], encoding.sequence_ids(0), encoding.word_ids(0)):
1301
+ ... if s == 1:
1302
+ ... bbox.append(boxes[w])
1303
+ ... elif i == tokenizer.sep_token_id:
1304
+ ... bbox.append([1000] * 4)
1305
+ ... else:
1306
+ ... bbox.append([0] * 4)
1307
+ >>> encoding["bbox"] = torch.tensor([bbox])
1308
+
1309
+ >>> word_ids = encoding.word_ids(0)
1310
+ >>> outputs = model(**encoding)
1311
+ >>> loss = outputs.loss
1312
+ >>> start_scores = outputs.start_logits
1313
+ >>> end_scores = outputs.end_logits
1314
+ >>> start, end = word_ids[start_scores.argmax(-1)], word_ids[end_scores.argmax(-1)]
1315
+ >>> print(" ".join(words[start : end + 1]))
1316
+ M. Hamann P. Harper, P. Martinez
1317
+ ```"""
1318
+
1319
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1320
+
1321
+ outputs = self.layoutlm(
1322
+ input_ids=input_ids,
1323
+ bbox=bbox,
1324
+ attention_mask=attention_mask,
1325
+ token_type_ids=token_type_ids,
1326
+ position_ids=position_ids,
1327
+ head_mask=head_mask,
1328
+ inputs_embeds=inputs_embeds,
1329
+ output_attentions=output_attentions,
1330
+ output_hidden_states=output_hidden_states,
1331
+ return_dict=return_dict,
1332
+ )
1333
+
1334
+ sequence_output = outputs[0]
1335
+
1336
+ logits = self.qa_outputs(sequence_output)
1337
+ start_logits, end_logits = logits.split(1, dim=-1)
1338
+ start_logits = start_logits.squeeze(-1).contiguous()
1339
+ end_logits = end_logits.squeeze(-1).contiguous()
1340
+
1341
+ total_loss = None
1342
+ if start_positions is not None and end_positions is not None:
1343
+ # If we are on multi-GPU, split add a dimension
1344
+ if len(start_positions.size()) > 1:
1345
+ start_positions = start_positions.squeeze(-1)
1346
+ if len(end_positions.size()) > 1:
1347
+ end_positions = end_positions.squeeze(-1)
1348
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1349
+ ignored_index = start_logits.size(1)
1350
+ start_positions = start_positions.clamp(0, ignored_index)
1351
+ end_positions = end_positions.clamp(0, ignored_index)
1352
+
1353
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1354
+ start_loss = loss_fct(start_logits, start_positions)
1355
+ end_loss = loss_fct(end_logits, end_positions)
1356
+ total_loss = (start_loss + end_loss) / 2
1357
+
1358
+ if not return_dict:
1359
+ output = (start_logits, end_logits) + outputs[2:]
1360
+ return ((total_loss,) + output) if total_loss is not None else output
1361
+
1362
+ return QuestionAnsweringModelOutput(
1363
+ loss=total_loss,
1364
+ start_logits=start_logits,
1365
+ end_logits=end_logits,
1366
+ hidden_states=outputs.hidden_states,
1367
+ attentions=outputs.attentions,
1368
+ )