applied-ai-018 commited on
Commit
ee92357
·
verified ·
1 Parent(s): 51713a2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__init__.py +179 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/configuration_albert.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/convert_albert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_albert.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_tf_albert.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert_fast.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/configuration_albert.py +167 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py +63 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/modeling_albert.py +1382 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/modeling_flax_albert.py +1121 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/modeling_tf_albert.py +1564 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert.py +346 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert_fast.py +210 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__init__.py +71 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/__init__.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/configuration_blip_2.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/convert_blip_2_original_to_pytorch.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/modeling_blip_2.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/processing_blip_2.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/configuration_blip_2.py +355 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py +291 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py +1853 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/processing_blip_2.py +155 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__init__.py +86 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/configuration_pix2struct.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/image_processing_pix2struct.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/modeling_pix2struct.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/processing_pix2struct.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/configuration_pix2struct.py +387 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py +155 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/image_processing_pix2struct.py +460 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/modeling_pix2struct.py +1786 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/processing_pix2struct.py +163 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__init__.py +105 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/configuration_sam.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_to_hf.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/processing_sam.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/configuration_sam.py +309 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/convert_sam_to_hf.py +250 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/image_processing_sam.py +1496 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/modeling_sam.py +1415 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/sam/modeling_tf_sam.py +1656 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__init__.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_sentencepiece_available,
22
+ is_tf_available,
23
+ is_tokenizers_available,
24
+ is_torch_available,
25
+ )
26
+
27
+
28
+ _import_structure = {
29
+ "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
30
+ }
31
+
32
+ try:
33
+ if not is_sentencepiece_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_albert"] = ["AlbertTokenizer"]
39
+
40
+ try:
41
+ if not is_tokenizers_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"]
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ _import_structure["modeling_albert"] = [
55
+ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
56
+ "AlbertForMaskedLM",
57
+ "AlbertForMultipleChoice",
58
+ "AlbertForPreTraining",
59
+ "AlbertForQuestionAnswering",
60
+ "AlbertForSequenceClassification",
61
+ "AlbertForTokenClassification",
62
+ "AlbertModel",
63
+ "AlbertPreTrainedModel",
64
+ "load_tf_weights_in_albert",
65
+ ]
66
+
67
+ try:
68
+ if not is_tf_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ _import_structure["modeling_tf_albert"] = [
74
+ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
75
+ "TFAlbertForMaskedLM",
76
+ "TFAlbertForMultipleChoice",
77
+ "TFAlbertForPreTraining",
78
+ "TFAlbertForQuestionAnswering",
79
+ "TFAlbertForSequenceClassification",
80
+ "TFAlbertForTokenClassification",
81
+ "TFAlbertMainLayer",
82
+ "TFAlbertModel",
83
+ "TFAlbertPreTrainedModel",
84
+ ]
85
+
86
+ try:
87
+ if not is_flax_available():
88
+ raise OptionalDependencyNotAvailable()
89
+ except OptionalDependencyNotAvailable:
90
+ pass
91
+ else:
92
+ _import_structure["modeling_flax_albert"] = [
93
+ "FlaxAlbertForMaskedLM",
94
+ "FlaxAlbertForMultipleChoice",
95
+ "FlaxAlbertForPreTraining",
96
+ "FlaxAlbertForQuestionAnswering",
97
+ "FlaxAlbertForSequenceClassification",
98
+ "FlaxAlbertForTokenClassification",
99
+ "FlaxAlbertModel",
100
+ "FlaxAlbertPreTrainedModel",
101
+ ]
102
+
103
+ if TYPE_CHECKING:
104
+ from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
105
+
106
+ try:
107
+ if not is_sentencepiece_available():
108
+ raise OptionalDependencyNotAvailable()
109
+ except OptionalDependencyNotAvailable:
110
+ pass
111
+ else:
112
+ from .tokenization_albert import AlbertTokenizer
113
+
114
+ try:
115
+ if not is_tokenizers_available():
116
+ raise OptionalDependencyNotAvailable()
117
+ except OptionalDependencyNotAvailable:
118
+ pass
119
+ else:
120
+ from .tokenization_albert_fast import AlbertTokenizerFast
121
+
122
+ try:
123
+ if not is_torch_available():
124
+ raise OptionalDependencyNotAvailable()
125
+ except OptionalDependencyNotAvailable:
126
+ pass
127
+ else:
128
+ from .modeling_albert import (
129
+ ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
130
+ AlbertForMaskedLM,
131
+ AlbertForMultipleChoice,
132
+ AlbertForPreTraining,
133
+ AlbertForQuestionAnswering,
134
+ AlbertForSequenceClassification,
135
+ AlbertForTokenClassification,
136
+ AlbertModel,
137
+ AlbertPreTrainedModel,
138
+ load_tf_weights_in_albert,
139
+ )
140
+
141
+ try:
142
+ if not is_tf_available():
143
+ raise OptionalDependencyNotAvailable()
144
+ except OptionalDependencyNotAvailable:
145
+ pass
146
+ else:
147
+ from .modeling_tf_albert import (
148
+ TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
149
+ TFAlbertForMaskedLM,
150
+ TFAlbertForMultipleChoice,
151
+ TFAlbertForPreTraining,
152
+ TFAlbertForQuestionAnswering,
153
+ TFAlbertForSequenceClassification,
154
+ TFAlbertForTokenClassification,
155
+ TFAlbertMainLayer,
156
+ TFAlbertModel,
157
+ TFAlbertPreTrainedModel,
158
+ )
159
+
160
+ try:
161
+ if not is_flax_available():
162
+ raise OptionalDependencyNotAvailable()
163
+ except OptionalDependencyNotAvailable:
164
+ pass
165
+ else:
166
+ from .modeling_flax_albert import (
167
+ FlaxAlbertForMaskedLM,
168
+ FlaxAlbertForMultipleChoice,
169
+ FlaxAlbertForPreTraining,
170
+ FlaxAlbertForQuestionAnswering,
171
+ FlaxAlbertForSequenceClassification,
172
+ FlaxAlbertForTokenClassification,
173
+ FlaxAlbertModel,
174
+ FlaxAlbertPreTrainedModel,
175
+ )
176
+ else:
177
+ import sys
178
+
179
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/configuration_albert.cpython-310.pyc ADDED
Binary file (7.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/convert_albert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_albert.cpython-310.pyc ADDED
Binary file (41.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/modeling_tf_albert.cpython-310.pyc ADDED
Binary file (47.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/__pycache__/tokenization_albert_fast.cpython-310.pyc ADDED
Binary file (7.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/configuration_albert.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ ALBERT model configuration"""
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ..deprecated._archive_maps import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
23
+
24
+
25
+ class AlbertConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used
28
+ to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating
29
+ a configuration with the defaults will yield a similar configuration to that of the ALBERT
30
+ [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 30000):
37
+ Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`].
39
+ embedding_size (`int`, *optional*, defaults to 128):
40
+ Dimensionality of vocabulary embeddings.
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimensionality of the encoder layers and the pooler layer.
43
+ num_hidden_layers (`int`, *optional*, defaults to 12):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_hidden_groups (`int`, *optional*, defaults to 1):
46
+ Number of groups for the hidden layers, parameters in the same group are shared.
47
+ num_attention_heads (`int`, *optional*, defaults to 64):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 16384):
50
+ The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
51
+ inner_group_num (`int`, *optional*, defaults to 1):
52
+ The number of inner repetition of attention and ffn.
53
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu_new"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0):
59
+ The dropout ratio for the attention probabilities.
60
+ max_position_embeddings (`int`, *optional*, defaults to 512):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ (e.g., 512 or 1024 or 2048).
63
+ type_vocab_size (`int`, *optional*, defaults to 2):
64
+ The vocabulary size of the `token_type_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`].
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
70
+ The dropout ratio for attached classifiers.
71
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
72
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
73
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
74
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
75
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
76
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
77
+ pad_token_id (`int`, *optional*, defaults to 0):
78
+ Padding token id.
79
+ bos_token_id (`int`, *optional*, defaults to 2):
80
+ Beginning of stream token id.
81
+ eos_token_id (`int`, *optional*, defaults to 3):
82
+ End of stream token id.
83
+
84
+ Examples:
85
+
86
+ ```python
87
+ >>> from transformers import AlbertConfig, AlbertModel
88
+
89
+ >>> # Initializing an ALBERT-xxlarge style configuration
90
+ >>> albert_xxlarge_configuration = AlbertConfig()
91
+
92
+ >>> # Initializing an ALBERT-base style configuration
93
+ >>> albert_base_configuration = AlbertConfig(
94
+ ... hidden_size=768,
95
+ ... num_attention_heads=12,
96
+ ... intermediate_size=3072,
97
+ ... )
98
+
99
+ >>> # Initializing a model (with random weights) from the ALBERT-base style configuration
100
+ >>> model = AlbertModel(albert_xxlarge_configuration)
101
+
102
+ >>> # Accessing the model configuration
103
+ >>> configuration = model.config
104
+ ```"""
105
+
106
+ model_type = "albert"
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_size=30000,
111
+ embedding_size=128,
112
+ hidden_size=4096,
113
+ num_hidden_layers=12,
114
+ num_hidden_groups=1,
115
+ num_attention_heads=64,
116
+ intermediate_size=16384,
117
+ inner_group_num=1,
118
+ hidden_act="gelu_new",
119
+ hidden_dropout_prob=0,
120
+ attention_probs_dropout_prob=0,
121
+ max_position_embeddings=512,
122
+ type_vocab_size=2,
123
+ initializer_range=0.02,
124
+ layer_norm_eps=1e-12,
125
+ classifier_dropout_prob=0.1,
126
+ position_embedding_type="absolute",
127
+ pad_token_id=0,
128
+ bos_token_id=2,
129
+ eos_token_id=3,
130
+ **kwargs,
131
+ ):
132
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
133
+
134
+ self.vocab_size = vocab_size
135
+ self.embedding_size = embedding_size
136
+ self.hidden_size = hidden_size
137
+ self.num_hidden_layers = num_hidden_layers
138
+ self.num_hidden_groups = num_hidden_groups
139
+ self.num_attention_heads = num_attention_heads
140
+ self.inner_group_num = inner_group_num
141
+ self.hidden_act = hidden_act
142
+ self.intermediate_size = intermediate_size
143
+ self.hidden_dropout_prob = hidden_dropout_prob
144
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
145
+ self.max_position_embeddings = max_position_embeddings
146
+ self.type_vocab_size = type_vocab_size
147
+ self.initializer_range = initializer_range
148
+ self.layer_norm_eps = layer_norm_eps
149
+ self.classifier_dropout_prob = classifier_dropout_prob
150
+ self.position_embedding_type = position_embedding_type
151
+
152
+
153
+ # Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Roberta->Albert
154
+ class AlbertOnnxConfig(OnnxConfig):
155
+ @property
156
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
157
+ if self.task == "multiple-choice":
158
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
159
+ else:
160
+ dynamic_axis = {0: "batch", 1: "sequence"}
161
+ return OrderedDict(
162
+ [
163
+ ("input_ids", dynamic_axis),
164
+ ("attention_mask", dynamic_axis),
165
+ ("token_type_ids", dynamic_axis),
166
+ ]
167
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ALBERT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from ...utils import logging
23
+ from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path):
30
+ # Initialise PyTorch model
31
+ config = AlbertConfig.from_json_file(albert_config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = AlbertForPreTraining(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_albert(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--albert_config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help=(
55
+ "The config json file corresponding to the pre-trained ALBERT model. \n"
56
+ "This specifies the model architecture."
57
+ ),
58
+ )
59
+ parser.add_argument(
60
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
61
+ )
62
+ args = parser.parse_args()
63
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/modeling_albert.py ADDED
@@ -0,0 +1,1382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ALBERT model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPooling,
30
+ MaskedLMOutput,
31
+ MultipleChoiceModelOutput,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
38
+ from ...utils import (
39
+ ModelOutput,
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ logging,
44
+ replace_return_docstrings,
45
+ )
46
+ from .configuration_albert import AlbertConfig
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
52
+ _CONFIG_FOR_DOC = "AlbertConfig"
53
+
54
+
55
+ from ..deprecated._archive_maps import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
56
+
57
+
58
+ def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
59
+ """Load tf checkpoints in a pytorch model."""
60
+ try:
61
+ import re
62
+
63
+ import numpy as np
64
+ import tensorflow as tf
65
+ except ImportError:
66
+ logger.error(
67
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
68
+ "https://www.tensorflow.org/install/ for installation instructions."
69
+ )
70
+ raise
71
+ tf_path = os.path.abspath(tf_checkpoint_path)
72
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
73
+ # Load weights from TF model
74
+ init_vars = tf.train.list_variables(tf_path)
75
+ names = []
76
+ arrays = []
77
+ for name, shape in init_vars:
78
+ logger.info(f"Loading TF weight {name} with shape {shape}")
79
+ array = tf.train.load_variable(tf_path, name)
80
+ names.append(name)
81
+ arrays.append(array)
82
+
83
+ for name, array in zip(names, arrays):
84
+ print(name)
85
+
86
+ for name, array in zip(names, arrays):
87
+ original_name = name
88
+
89
+ # If saved from the TF HUB module
90
+ name = name.replace("module/", "")
91
+
92
+ # Renaming and simplifying
93
+ name = name.replace("ffn_1", "ffn")
94
+ name = name.replace("bert/", "albert/")
95
+ name = name.replace("attention_1", "attention")
96
+ name = name.replace("transform/", "")
97
+ name = name.replace("LayerNorm_1", "full_layer_layer_norm")
98
+ name = name.replace("LayerNorm", "attention/LayerNorm")
99
+ name = name.replace("transformer/", "")
100
+
101
+ # The feed forward layer had an 'intermediate' step which has been abstracted away
102
+ name = name.replace("intermediate/dense/", "")
103
+ name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
104
+
105
+ # ALBERT attention was split between self and output which have been abstracted away
106
+ name = name.replace("/output/", "/")
107
+ name = name.replace("/self/", "/")
108
+
109
+ # The pooler is a linear layer
110
+ name = name.replace("pooler/dense", "pooler")
111
+
112
+ # The classifier was simplified to predictions from cls/predictions
113
+ name = name.replace("cls/predictions", "predictions")
114
+ name = name.replace("predictions/attention", "predictions")
115
+
116
+ # Naming was changed to be more explicit
117
+ name = name.replace("embeddings/attention", "embeddings")
118
+ name = name.replace("inner_group_", "albert_layers/")
119
+ name = name.replace("group_", "albert_layer_groups/")
120
+
121
+ # Classifier
122
+ if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
123
+ name = "classifier/" + name
124
+
125
+ # No ALBERT model currently handles the next sentence prediction task
126
+ if "seq_relationship" in name:
127
+ name = name.replace("seq_relationship/output_", "sop_classifier/classifier/")
128
+ name = name.replace("weights", "weight")
129
+
130
+ name = name.split("/")
131
+
132
+ # Ignore the gradients applied by the LAMB/ADAM optimizers.
133
+ if (
134
+ "adam_m" in name
135
+ or "adam_v" in name
136
+ or "AdamWeightDecayOptimizer" in name
137
+ or "AdamWeightDecayOptimizer_1" in name
138
+ or "global_step" in name
139
+ ):
140
+ logger.info(f"Skipping {'/'.join(name)}")
141
+ continue
142
+
143
+ pointer = model
144
+ for m_name in name:
145
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
146
+ scope_names = re.split(r"_(\d+)", m_name)
147
+ else:
148
+ scope_names = [m_name]
149
+
150
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
151
+ pointer = getattr(pointer, "weight")
152
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
153
+ pointer = getattr(pointer, "bias")
154
+ elif scope_names[0] == "output_weights":
155
+ pointer = getattr(pointer, "weight")
156
+ elif scope_names[0] == "squad":
157
+ pointer = getattr(pointer, "classifier")
158
+ else:
159
+ try:
160
+ pointer = getattr(pointer, scope_names[0])
161
+ except AttributeError:
162
+ logger.info(f"Skipping {'/'.join(name)}")
163
+ continue
164
+ if len(scope_names) >= 2:
165
+ num = int(scope_names[1])
166
+ pointer = pointer[num]
167
+
168
+ if m_name[-11:] == "_embeddings":
169
+ pointer = getattr(pointer, "weight")
170
+ elif m_name == "kernel":
171
+ array = np.transpose(array)
172
+ try:
173
+ if pointer.shape != array.shape:
174
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
175
+ except ValueError as e:
176
+ e.args += (pointer.shape, array.shape)
177
+ raise
178
+ print(f"Initialize PyTorch weight {name} from {original_name}")
179
+ pointer.data = torch.from_numpy(array)
180
+
181
+ return model
182
+
183
+
184
+ class AlbertEmbeddings(nn.Module):
185
+ """
186
+ Construct the embeddings from word, position and token_type embeddings.
187
+ """
188
+
189
+ def __init__(self, config: AlbertConfig):
190
+ super().__init__()
191
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
192
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
193
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
194
+
195
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
196
+ # any TensorFlow checkpoint file
197
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
198
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
199
+
200
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
201
+ self.register_buffer(
202
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
203
+ )
204
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
205
+ self.register_buffer(
206
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
207
+ )
208
+
209
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
210
+ def forward(
211
+ self,
212
+ input_ids: Optional[torch.LongTensor] = None,
213
+ token_type_ids: Optional[torch.LongTensor] = None,
214
+ position_ids: Optional[torch.LongTensor] = None,
215
+ inputs_embeds: Optional[torch.FloatTensor] = None,
216
+ past_key_values_length: int = 0,
217
+ ) -> torch.Tensor:
218
+ if input_ids is not None:
219
+ input_shape = input_ids.size()
220
+ else:
221
+ input_shape = inputs_embeds.size()[:-1]
222
+
223
+ seq_length = input_shape[1]
224
+
225
+ if position_ids is None:
226
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
227
+
228
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
229
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
230
+ # issue #5664
231
+ if token_type_ids is None:
232
+ if hasattr(self, "token_type_ids"):
233
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
234
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
235
+ token_type_ids = buffered_token_type_ids_expanded
236
+ else:
237
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
238
+
239
+ if inputs_embeds is None:
240
+ inputs_embeds = self.word_embeddings(input_ids)
241
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
242
+
243
+ embeddings = inputs_embeds + token_type_embeddings
244
+ if self.position_embedding_type == "absolute":
245
+ position_embeddings = self.position_embeddings(position_ids)
246
+ embeddings += position_embeddings
247
+ embeddings = self.LayerNorm(embeddings)
248
+ embeddings = self.dropout(embeddings)
249
+ return embeddings
250
+
251
+
252
+ class AlbertAttention(nn.Module):
253
+ def __init__(self, config: AlbertConfig):
254
+ super().__init__()
255
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
256
+ raise ValueError(
257
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
258
+ f"heads ({config.num_attention_heads}"
259
+ )
260
+
261
+ self.num_attention_heads = config.num_attention_heads
262
+ self.hidden_size = config.hidden_size
263
+ self.attention_head_size = config.hidden_size // config.num_attention_heads
264
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
265
+
266
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
267
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
268
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
269
+
270
+ self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
271
+ self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
272
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
273
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
274
+ self.pruned_heads = set()
275
+
276
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
277
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
278
+ self.max_position_embeddings = config.max_position_embeddings
279
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
280
+
281
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
282
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
283
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
284
+ x = x.view(new_x_shape)
285
+ return x.permute(0, 2, 1, 3)
286
+
287
+ def prune_heads(self, heads: List[int]) -> None:
288
+ if len(heads) == 0:
289
+ return
290
+ heads, index = find_pruneable_heads_and_indices(
291
+ heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads
292
+ )
293
+
294
+ # Prune linear layers
295
+ self.query = prune_linear_layer(self.query, index)
296
+ self.key = prune_linear_layer(self.key, index)
297
+ self.value = prune_linear_layer(self.value, index)
298
+ self.dense = prune_linear_layer(self.dense, index, dim=1)
299
+
300
+ # Update hyper params and store pruned heads
301
+ self.num_attention_heads = self.num_attention_heads - len(heads)
302
+ self.all_head_size = self.attention_head_size * self.num_attention_heads
303
+ self.pruned_heads = self.pruned_heads.union(heads)
304
+
305
+ def forward(
306
+ self,
307
+ hidden_states: torch.Tensor,
308
+ attention_mask: Optional[torch.FloatTensor] = None,
309
+ head_mask: Optional[torch.FloatTensor] = None,
310
+ output_attentions: bool = False,
311
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
312
+ mixed_query_layer = self.query(hidden_states)
313
+ mixed_key_layer = self.key(hidden_states)
314
+ mixed_value_layer = self.value(hidden_states)
315
+
316
+ query_layer = self.transpose_for_scores(mixed_query_layer)
317
+ key_layer = self.transpose_for_scores(mixed_key_layer)
318
+ value_layer = self.transpose_for_scores(mixed_value_layer)
319
+
320
+ # Take the dot product between "query" and "key" to get the raw attention scores.
321
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
322
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
323
+
324
+ if attention_mask is not None:
325
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
326
+ attention_scores = attention_scores + attention_mask
327
+
328
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
329
+ seq_length = hidden_states.size()[1]
330
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
331
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
332
+ distance = position_ids_l - position_ids_r
333
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
334
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
335
+
336
+ if self.position_embedding_type == "relative_key":
337
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
338
+ attention_scores = attention_scores + relative_position_scores
339
+ elif self.position_embedding_type == "relative_key_query":
340
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
341
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
342
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
343
+
344
+ # Normalize the attention scores to probabilities.
345
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
346
+
347
+ # This is actually dropping out entire tokens to attend to, which might
348
+ # seem a bit unusual, but is taken from the original Transformer paper.
349
+ attention_probs = self.attention_dropout(attention_probs)
350
+
351
+ # Mask heads if we want to
352
+ if head_mask is not None:
353
+ attention_probs = attention_probs * head_mask
354
+
355
+ context_layer = torch.matmul(attention_probs, value_layer)
356
+ context_layer = context_layer.transpose(2, 1).flatten(2)
357
+
358
+ projected_context_layer = self.dense(context_layer)
359
+ projected_context_layer_dropout = self.output_dropout(projected_context_layer)
360
+ layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
361
+ return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)
362
+
363
+
364
+ class AlbertLayer(nn.Module):
365
+ def __init__(self, config: AlbertConfig):
366
+ super().__init__()
367
+
368
+ self.config = config
369
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
370
+ self.seq_len_dim = 1
371
+ self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
372
+ self.attention = AlbertAttention(config)
373
+ self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
374
+ self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
375
+ self.activation = ACT2FN[config.hidden_act]
376
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
377
+
378
+ def forward(
379
+ self,
380
+ hidden_states: torch.Tensor,
381
+ attention_mask: Optional[torch.FloatTensor] = None,
382
+ head_mask: Optional[torch.FloatTensor] = None,
383
+ output_attentions: bool = False,
384
+ output_hidden_states: bool = False,
385
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
386
+ attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
387
+
388
+ ffn_output = apply_chunking_to_forward(
389
+ self.ff_chunk,
390
+ self.chunk_size_feed_forward,
391
+ self.seq_len_dim,
392
+ attention_output[0],
393
+ )
394
+ hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
395
+
396
+ return (hidden_states,) + attention_output[1:] # add attentions if we output them
397
+
398
+ def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor:
399
+ ffn_output = self.ffn(attention_output)
400
+ ffn_output = self.activation(ffn_output)
401
+ ffn_output = self.ffn_output(ffn_output)
402
+ return ffn_output
403
+
404
+
405
+ class AlbertLayerGroup(nn.Module):
406
+ def __init__(self, config: AlbertConfig):
407
+ super().__init__()
408
+
409
+ self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
410
+
411
+ def forward(
412
+ self,
413
+ hidden_states: torch.Tensor,
414
+ attention_mask: Optional[torch.FloatTensor] = None,
415
+ head_mask: Optional[torch.FloatTensor] = None,
416
+ output_attentions: bool = False,
417
+ output_hidden_states: bool = False,
418
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
419
+ layer_hidden_states = ()
420
+ layer_attentions = ()
421
+
422
+ for layer_index, albert_layer in enumerate(self.albert_layers):
423
+ layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)
424
+ hidden_states = layer_output[0]
425
+
426
+ if output_attentions:
427
+ layer_attentions = layer_attentions + (layer_output[1],)
428
+
429
+ if output_hidden_states:
430
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
431
+
432
+ outputs = (hidden_states,)
433
+ if output_hidden_states:
434
+ outputs = outputs + (layer_hidden_states,)
435
+ if output_attentions:
436
+ outputs = outputs + (layer_attentions,)
437
+ return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
438
+
439
+
440
+ class AlbertTransformer(nn.Module):
441
+ def __init__(self, config: AlbertConfig):
442
+ super().__init__()
443
+
444
+ self.config = config
445
+ self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
446
+ self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
447
+
448
+ def forward(
449
+ self,
450
+ hidden_states: torch.Tensor,
451
+ attention_mask: Optional[torch.FloatTensor] = None,
452
+ head_mask: Optional[torch.FloatTensor] = None,
453
+ output_attentions: bool = False,
454
+ output_hidden_states: bool = False,
455
+ return_dict: bool = True,
456
+ ) -> Union[BaseModelOutput, Tuple]:
457
+ hidden_states = self.embedding_hidden_mapping_in(hidden_states)
458
+
459
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
460
+ all_attentions = () if output_attentions else None
461
+
462
+ head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask
463
+
464
+ for i in range(self.config.num_hidden_layers):
465
+ # Number of layers in a hidden group
466
+ layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
467
+
468
+ # Index of the hidden group
469
+ group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
470
+
471
+ layer_group_output = self.albert_layer_groups[group_idx](
472
+ hidden_states,
473
+ attention_mask,
474
+ head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
475
+ output_attentions,
476
+ output_hidden_states,
477
+ )
478
+ hidden_states = layer_group_output[0]
479
+
480
+ if output_attentions:
481
+ all_attentions = all_attentions + layer_group_output[-1]
482
+
483
+ if output_hidden_states:
484
+ all_hidden_states = all_hidden_states + (hidden_states,)
485
+
486
+ if not return_dict:
487
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
488
+ return BaseModelOutput(
489
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
490
+ )
491
+
492
+
493
+ class AlbertPreTrainedModel(PreTrainedModel):
494
+ """
495
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
496
+ models.
497
+ """
498
+
499
+ config_class = AlbertConfig
500
+ load_tf_weights = load_tf_weights_in_albert
501
+ base_model_prefix = "albert"
502
+
503
+ def _init_weights(self, module):
504
+ """Initialize the weights."""
505
+ if isinstance(module, nn.Linear):
506
+ # Slightly different from the TF version which uses truncated_normal for initialization
507
+ # cf https://github.com/pytorch/pytorch/pull/5617
508
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
509
+ if module.bias is not None:
510
+ module.bias.data.zero_()
511
+ elif isinstance(module, nn.Embedding):
512
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
513
+ if module.padding_idx is not None:
514
+ module.weight.data[module.padding_idx].zero_()
515
+ elif isinstance(module, nn.LayerNorm):
516
+ module.bias.data.zero_()
517
+ module.weight.data.fill_(1.0)
518
+
519
+
520
+ @dataclass
521
+ class AlbertForPreTrainingOutput(ModelOutput):
522
+ """
523
+ Output type of [`AlbertForPreTraining`].
524
+
525
+ Args:
526
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
527
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
528
+ (classification) loss.
529
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
530
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
531
+ sop_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
532
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
533
+ before SoftMax).
534
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
535
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
536
+ shape `(batch_size, sequence_length, hidden_size)`.
537
+
538
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
539
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
540
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
541
+ sequence_length)`.
542
+
543
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
544
+ heads.
545
+ """
546
+
547
+ loss: Optional[torch.FloatTensor] = None
548
+ prediction_logits: torch.FloatTensor = None
549
+ sop_logits: torch.FloatTensor = None
550
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
551
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
552
+
553
+
554
+ ALBERT_START_DOCSTRING = r"""
555
+
556
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
557
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
558
+ etc.)
559
+
560
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
561
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
562
+ and behavior.
563
+
564
+ Args:
565
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
566
+ Initializing with a config file does not load the weights associated with the model, only the
567
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
568
+ """
569
+
570
+ ALBERT_INPUTS_DOCSTRING = r"""
571
+ Args:
572
+ input_ids (`torch.LongTensor` of shape `({0})`):
573
+ Indices of input sequence tokens in the vocabulary.
574
+
575
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
576
+ [`PreTrainedTokenizer.encode`] for details.
577
+
578
+ [What are input IDs?](../glossary#input-ids)
579
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
580
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
581
+
582
+ - 1 for tokens that are **not masked**,
583
+ - 0 for tokens that are **masked**.
584
+
585
+ [What are attention masks?](../glossary#attention-mask)
586
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
587
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
588
+ 1]`:
589
+
590
+ - 0 corresponds to a *sentence A* token,
591
+ - 1 corresponds to a *sentence B* token.
592
+
593
+ [What are token type IDs?](../glossary#token-type-ids)
594
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
595
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
596
+ config.max_position_embeddings - 1]`.
597
+
598
+ [What are position IDs?](../glossary#position-ids)
599
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
600
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
601
+
602
+ - 1 indicates the head is **not masked**,
603
+ - 0 indicates the head is **masked**.
604
+
605
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
606
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
607
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
608
+ model's internal embedding lookup matrix.
609
+ output_attentions (`bool`, *optional*):
610
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
611
+ tensors for more detail.
612
+ output_hidden_states (`bool`, *optional*):
613
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
614
+ more detail.
615
+ return_dict (`bool`, *optional*):
616
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
617
+ """
618
+
619
+
620
+ @add_start_docstrings(
621
+ "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
622
+ ALBERT_START_DOCSTRING,
623
+ )
624
+ class AlbertModel(AlbertPreTrainedModel):
625
+ config_class = AlbertConfig
626
+ base_model_prefix = "albert"
627
+
628
+ def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True):
629
+ super().__init__(config)
630
+
631
+ self.config = config
632
+ self.embeddings = AlbertEmbeddings(config)
633
+ self.encoder = AlbertTransformer(config)
634
+ if add_pooling_layer:
635
+ self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
636
+ self.pooler_activation = nn.Tanh()
637
+ else:
638
+ self.pooler = None
639
+ self.pooler_activation = None
640
+
641
+ # Initialize weights and apply final processing
642
+ self.post_init()
643
+
644
+ def get_input_embeddings(self) -> nn.Embedding:
645
+ return self.embeddings.word_embeddings
646
+
647
+ def set_input_embeddings(self, value: nn.Embedding) -> None:
648
+ self.embeddings.word_embeddings = value
649
+
650
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
651
+ """
652
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
653
+ a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
654
+ model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
655
+
656
+ These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
657
+ while [2,3] correspond to the two inner groups of the second hidden layer.
658
+
659
+ Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
660
+ information about head pruning
661
+ """
662
+ for layer, heads in heads_to_prune.items():
663
+ group_idx = int(layer / self.config.inner_group_num)
664
+ inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
665
+ self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
666
+
667
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
668
+ @add_code_sample_docstrings(
669
+ checkpoint=_CHECKPOINT_FOR_DOC,
670
+ output_type=BaseModelOutputWithPooling,
671
+ config_class=_CONFIG_FOR_DOC,
672
+ )
673
+ def forward(
674
+ self,
675
+ input_ids: Optional[torch.LongTensor] = None,
676
+ attention_mask: Optional[torch.FloatTensor] = None,
677
+ token_type_ids: Optional[torch.LongTensor] = None,
678
+ position_ids: Optional[torch.LongTensor] = None,
679
+ head_mask: Optional[torch.FloatTensor] = None,
680
+ inputs_embeds: Optional[torch.FloatTensor] = None,
681
+ output_attentions: Optional[bool] = None,
682
+ output_hidden_states: Optional[bool] = None,
683
+ return_dict: Optional[bool] = None,
684
+ ) -> Union[BaseModelOutputWithPooling, Tuple]:
685
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
686
+ output_hidden_states = (
687
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
688
+ )
689
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
690
+
691
+ if input_ids is not None and inputs_embeds is not None:
692
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
693
+ elif input_ids is not None:
694
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
695
+ input_shape = input_ids.size()
696
+ elif inputs_embeds is not None:
697
+ input_shape = inputs_embeds.size()[:-1]
698
+ else:
699
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
700
+
701
+ batch_size, seq_length = input_shape
702
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
703
+
704
+ if attention_mask is None:
705
+ attention_mask = torch.ones(input_shape, device=device)
706
+ if token_type_ids is None:
707
+ if hasattr(self.embeddings, "token_type_ids"):
708
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
709
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
710
+ token_type_ids = buffered_token_type_ids_expanded
711
+ else:
712
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
713
+
714
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
715
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
716
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
717
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
718
+
719
+ embedding_output = self.embeddings(
720
+ input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
721
+ )
722
+ encoder_outputs = self.encoder(
723
+ embedding_output,
724
+ extended_attention_mask,
725
+ head_mask=head_mask,
726
+ output_attentions=output_attentions,
727
+ output_hidden_states=output_hidden_states,
728
+ return_dict=return_dict,
729
+ )
730
+
731
+ sequence_output = encoder_outputs[0]
732
+
733
+ pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
734
+
735
+ if not return_dict:
736
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
737
+
738
+ return BaseModelOutputWithPooling(
739
+ last_hidden_state=sequence_output,
740
+ pooler_output=pooled_output,
741
+ hidden_states=encoder_outputs.hidden_states,
742
+ attentions=encoder_outputs.attentions,
743
+ )
744
+
745
+
746
+ @add_start_docstrings(
747
+ """
748
+ Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
749
+ `sentence order prediction (classification)` head.
750
+ """,
751
+ ALBERT_START_DOCSTRING,
752
+ )
753
+ class AlbertForPreTraining(AlbertPreTrainedModel):
754
+ _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
755
+
756
+ def __init__(self, config: AlbertConfig):
757
+ super().__init__(config)
758
+
759
+ self.albert = AlbertModel(config)
760
+ self.predictions = AlbertMLMHead(config)
761
+ self.sop_classifier = AlbertSOPHead(config)
762
+
763
+ # Initialize weights and apply final processing
764
+ self.post_init()
765
+
766
+ def get_output_embeddings(self) -> nn.Linear:
767
+ return self.predictions.decoder
768
+
769
+ def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
770
+ self.predictions.decoder = new_embeddings
771
+
772
+ def get_input_embeddings(self) -> nn.Embedding:
773
+ return self.albert.embeddings.word_embeddings
774
+
775
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
776
+ @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
777
+ def forward(
778
+ self,
779
+ input_ids: Optional[torch.LongTensor] = None,
780
+ attention_mask: Optional[torch.FloatTensor] = None,
781
+ token_type_ids: Optional[torch.LongTensor] = None,
782
+ position_ids: Optional[torch.LongTensor] = None,
783
+ head_mask: Optional[torch.FloatTensor] = None,
784
+ inputs_embeds: Optional[torch.FloatTensor] = None,
785
+ labels: Optional[torch.LongTensor] = None,
786
+ sentence_order_label: Optional[torch.LongTensor] = None,
787
+ output_attentions: Optional[bool] = None,
788
+ output_hidden_states: Optional[bool] = None,
789
+ return_dict: Optional[bool] = None,
790
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
791
+ r"""
792
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
793
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
794
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
795
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
796
+ sentence_order_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
797
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
798
+ (see `input_ids` docstring) Indices should be in `[0, 1]`. `0` indicates original order (sequence A, then
799
+ sequence B), `1` indicates switched order (sequence B, then sequence A).
800
+
801
+ Returns:
802
+
803
+ Example:
804
+
805
+ ```python
806
+ >>> from transformers import AutoTokenizer, AlbertForPreTraining
807
+ >>> import torch
808
+
809
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
810
+ >>> model = AlbertForPreTraining.from_pretrained("albert/albert-base-v2")
811
+
812
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
813
+ >>> # Batch size 1
814
+ >>> outputs = model(input_ids)
815
+
816
+ >>> prediction_logits = outputs.prediction_logits
817
+ >>> sop_logits = outputs.sop_logits
818
+ ```"""
819
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
820
+
821
+ outputs = self.albert(
822
+ input_ids,
823
+ attention_mask=attention_mask,
824
+ token_type_ids=token_type_ids,
825
+ position_ids=position_ids,
826
+ head_mask=head_mask,
827
+ inputs_embeds=inputs_embeds,
828
+ output_attentions=output_attentions,
829
+ output_hidden_states=output_hidden_states,
830
+ return_dict=return_dict,
831
+ )
832
+
833
+ sequence_output, pooled_output = outputs[:2]
834
+
835
+ prediction_scores = self.predictions(sequence_output)
836
+ sop_scores = self.sop_classifier(pooled_output)
837
+
838
+ total_loss = None
839
+ if labels is not None and sentence_order_label is not None:
840
+ loss_fct = CrossEntropyLoss()
841
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
842
+ sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
843
+ total_loss = masked_lm_loss + sentence_order_loss
844
+
845
+ if not return_dict:
846
+ output = (prediction_scores, sop_scores) + outputs[2:]
847
+ return ((total_loss,) + output) if total_loss is not None else output
848
+
849
+ return AlbertForPreTrainingOutput(
850
+ loss=total_loss,
851
+ prediction_logits=prediction_scores,
852
+ sop_logits=sop_scores,
853
+ hidden_states=outputs.hidden_states,
854
+ attentions=outputs.attentions,
855
+ )
856
+
857
+
858
+ class AlbertMLMHead(nn.Module):
859
+ def __init__(self, config: AlbertConfig):
860
+ super().__init__()
861
+
862
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
863
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
864
+ self.dense = nn.Linear(config.hidden_size, config.embedding_size)
865
+ self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
866
+ self.activation = ACT2FN[config.hidden_act]
867
+ self.decoder.bias = self.bias
868
+
869
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
870
+ hidden_states = self.dense(hidden_states)
871
+ hidden_states = self.activation(hidden_states)
872
+ hidden_states = self.LayerNorm(hidden_states)
873
+ hidden_states = self.decoder(hidden_states)
874
+
875
+ prediction_scores = hidden_states
876
+
877
+ return prediction_scores
878
+
879
+ def _tie_weights(self) -> None:
880
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
881
+ self.bias = self.decoder.bias
882
+
883
+
884
+ class AlbertSOPHead(nn.Module):
885
+ def __init__(self, config: AlbertConfig):
886
+ super().__init__()
887
+
888
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
889
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
890
+
891
+ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
892
+ dropout_pooled_output = self.dropout(pooled_output)
893
+ logits = self.classifier(dropout_pooled_output)
894
+ return logits
895
+
896
+
897
+ @add_start_docstrings(
898
+ "Albert Model with a `language modeling` head on top.",
899
+ ALBERT_START_DOCSTRING,
900
+ )
901
+ class AlbertForMaskedLM(AlbertPreTrainedModel):
902
+ _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
903
+
904
+ def __init__(self, config):
905
+ super().__init__(config)
906
+
907
+ self.albert = AlbertModel(config, add_pooling_layer=False)
908
+ self.predictions = AlbertMLMHead(config)
909
+
910
+ # Initialize weights and apply final processing
911
+ self.post_init()
912
+
913
+ def get_output_embeddings(self) -> nn.Linear:
914
+ return self.predictions.decoder
915
+
916
+ def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
917
+ self.predictions.decoder = new_embeddings
918
+
919
+ def get_input_embeddings(self) -> nn.Embedding:
920
+ return self.albert.embeddings.word_embeddings
921
+
922
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
923
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
924
+ def forward(
925
+ self,
926
+ input_ids: Optional[torch.LongTensor] = None,
927
+ attention_mask: Optional[torch.FloatTensor] = None,
928
+ token_type_ids: Optional[torch.LongTensor] = None,
929
+ position_ids: Optional[torch.LongTensor] = None,
930
+ head_mask: Optional[torch.FloatTensor] = None,
931
+ inputs_embeds: Optional[torch.FloatTensor] = None,
932
+ labels: Optional[torch.LongTensor] = None,
933
+ output_attentions: Optional[bool] = None,
934
+ output_hidden_states: Optional[bool] = None,
935
+ return_dict: Optional[bool] = None,
936
+ ) -> Union[MaskedLMOutput, Tuple]:
937
+ r"""
938
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
939
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
940
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
941
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
942
+
943
+ Returns:
944
+
945
+ Example:
946
+
947
+ ```python
948
+ >>> import torch
949
+ >>> from transformers import AutoTokenizer, AlbertForMaskedLM
950
+
951
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
952
+ >>> model = AlbertForMaskedLM.from_pretrained("albert/albert-base-v2")
953
+
954
+ >>> # add mask_token
955
+ >>> inputs = tokenizer("The capital of [MASK] is Paris.", return_tensors="pt")
956
+ >>> with torch.no_grad():
957
+ ... logits = model(**inputs).logits
958
+
959
+ >>> # retrieve index of [MASK]
960
+ >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
961
+ >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
962
+ >>> tokenizer.decode(predicted_token_id)
963
+ 'france'
964
+ ```
965
+
966
+ ```python
967
+ >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
968
+ >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
969
+ >>> outputs = model(**inputs, labels=labels)
970
+ >>> round(outputs.loss.item(), 2)
971
+ 0.81
972
+ ```
973
+ """
974
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
975
+
976
+ outputs = self.albert(
977
+ input_ids=input_ids,
978
+ attention_mask=attention_mask,
979
+ token_type_ids=token_type_ids,
980
+ position_ids=position_ids,
981
+ head_mask=head_mask,
982
+ inputs_embeds=inputs_embeds,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ )
987
+ sequence_outputs = outputs[0]
988
+
989
+ prediction_scores = self.predictions(sequence_outputs)
990
+
991
+ masked_lm_loss = None
992
+ if labels is not None:
993
+ loss_fct = CrossEntropyLoss()
994
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
995
+
996
+ if not return_dict:
997
+ output = (prediction_scores,) + outputs[2:]
998
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
999
+
1000
+ return MaskedLMOutput(
1001
+ loss=masked_lm_loss,
1002
+ logits=prediction_scores,
1003
+ hidden_states=outputs.hidden_states,
1004
+ attentions=outputs.attentions,
1005
+ )
1006
+
1007
+
1008
+ @add_start_docstrings(
1009
+ """
1010
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1011
+ output) e.g. for GLUE tasks.
1012
+ """,
1013
+ ALBERT_START_DOCSTRING,
1014
+ )
1015
+ class AlbertForSequenceClassification(AlbertPreTrainedModel):
1016
+ def __init__(self, config: AlbertConfig):
1017
+ super().__init__(config)
1018
+ self.num_labels = config.num_labels
1019
+ self.config = config
1020
+
1021
+ self.albert = AlbertModel(config)
1022
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
1023
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
1024
+
1025
+ # Initialize weights and apply final processing
1026
+ self.post_init()
1027
+
1028
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1029
+ @add_code_sample_docstrings(
1030
+ checkpoint="textattack/albert-base-v2-imdb",
1031
+ output_type=SequenceClassifierOutput,
1032
+ config_class=_CONFIG_FOR_DOC,
1033
+ expected_output="'LABEL_1'",
1034
+ expected_loss=0.12,
1035
+ )
1036
+ def forward(
1037
+ self,
1038
+ input_ids: Optional[torch.LongTensor] = None,
1039
+ attention_mask: Optional[torch.FloatTensor] = None,
1040
+ token_type_ids: Optional[torch.LongTensor] = None,
1041
+ position_ids: Optional[torch.LongTensor] = None,
1042
+ head_mask: Optional[torch.FloatTensor] = None,
1043
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1044
+ labels: Optional[torch.LongTensor] = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ ) -> Union[SequenceClassifierOutput, Tuple]:
1049
+ r"""
1050
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1051
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1052
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1053
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1054
+ """
1055
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1056
+
1057
+ outputs = self.albert(
1058
+ input_ids=input_ids,
1059
+ attention_mask=attention_mask,
1060
+ token_type_ids=token_type_ids,
1061
+ position_ids=position_ids,
1062
+ head_mask=head_mask,
1063
+ inputs_embeds=inputs_embeds,
1064
+ output_attentions=output_attentions,
1065
+ output_hidden_states=output_hidden_states,
1066
+ return_dict=return_dict,
1067
+ )
1068
+
1069
+ pooled_output = outputs[1]
1070
+
1071
+ pooled_output = self.dropout(pooled_output)
1072
+ logits = self.classifier(pooled_output)
1073
+
1074
+ loss = None
1075
+ if labels is not None:
1076
+ if self.config.problem_type is None:
1077
+ if self.num_labels == 1:
1078
+ self.config.problem_type = "regression"
1079
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1080
+ self.config.problem_type = "single_label_classification"
1081
+ else:
1082
+ self.config.problem_type = "multi_label_classification"
1083
+
1084
+ if self.config.problem_type == "regression":
1085
+ loss_fct = MSELoss()
1086
+ if self.num_labels == 1:
1087
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1088
+ else:
1089
+ loss = loss_fct(logits, labels)
1090
+ elif self.config.problem_type == "single_label_classification":
1091
+ loss_fct = CrossEntropyLoss()
1092
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1093
+ elif self.config.problem_type == "multi_label_classification":
1094
+ loss_fct = BCEWithLogitsLoss()
1095
+ loss = loss_fct(logits, labels)
1096
+
1097
+ if not return_dict:
1098
+ output = (logits,) + outputs[2:]
1099
+ return ((loss,) + output) if loss is not None else output
1100
+
1101
+ return SequenceClassifierOutput(
1102
+ loss=loss,
1103
+ logits=logits,
1104
+ hidden_states=outputs.hidden_states,
1105
+ attentions=outputs.attentions,
1106
+ )
1107
+
1108
+
1109
+ @add_start_docstrings(
1110
+ """
1111
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1112
+ Named-Entity-Recognition (NER) tasks.
1113
+ """,
1114
+ ALBERT_START_DOCSTRING,
1115
+ )
1116
+ class AlbertForTokenClassification(AlbertPreTrainedModel):
1117
+ def __init__(self, config: AlbertConfig):
1118
+ super().__init__(config)
1119
+ self.num_labels = config.num_labels
1120
+
1121
+ self.albert = AlbertModel(config, add_pooling_layer=False)
1122
+ classifier_dropout_prob = (
1123
+ config.classifier_dropout_prob
1124
+ if config.classifier_dropout_prob is not None
1125
+ else config.hidden_dropout_prob
1126
+ )
1127
+ self.dropout = nn.Dropout(classifier_dropout_prob)
1128
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
1129
+
1130
+ # Initialize weights and apply final processing
1131
+ self.post_init()
1132
+
1133
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1134
+ @add_code_sample_docstrings(
1135
+ checkpoint=_CHECKPOINT_FOR_DOC,
1136
+ output_type=TokenClassifierOutput,
1137
+ config_class=_CONFIG_FOR_DOC,
1138
+ )
1139
+ def forward(
1140
+ self,
1141
+ input_ids: Optional[torch.LongTensor] = None,
1142
+ attention_mask: Optional[torch.FloatTensor] = None,
1143
+ token_type_ids: Optional[torch.LongTensor] = None,
1144
+ position_ids: Optional[torch.LongTensor] = None,
1145
+ head_mask: Optional[torch.FloatTensor] = None,
1146
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1147
+ labels: Optional[torch.LongTensor] = None,
1148
+ output_attentions: Optional[bool] = None,
1149
+ output_hidden_states: Optional[bool] = None,
1150
+ return_dict: Optional[bool] = None,
1151
+ ) -> Union[TokenClassifierOutput, Tuple]:
1152
+ r"""
1153
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1154
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1155
+ """
1156
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1157
+
1158
+ outputs = self.albert(
1159
+ input_ids,
1160
+ attention_mask=attention_mask,
1161
+ token_type_ids=token_type_ids,
1162
+ position_ids=position_ids,
1163
+ head_mask=head_mask,
1164
+ inputs_embeds=inputs_embeds,
1165
+ output_attentions=output_attentions,
1166
+ output_hidden_states=output_hidden_states,
1167
+ return_dict=return_dict,
1168
+ )
1169
+
1170
+ sequence_output = outputs[0]
1171
+
1172
+ sequence_output = self.dropout(sequence_output)
1173
+ logits = self.classifier(sequence_output)
1174
+
1175
+ loss = None
1176
+ if labels is not None:
1177
+ loss_fct = CrossEntropyLoss()
1178
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1179
+
1180
+ if not return_dict:
1181
+ output = (logits,) + outputs[2:]
1182
+ return ((loss,) + output) if loss is not None else output
1183
+
1184
+ return TokenClassifierOutput(
1185
+ loss=loss,
1186
+ logits=logits,
1187
+ hidden_states=outputs.hidden_states,
1188
+ attentions=outputs.attentions,
1189
+ )
1190
+
1191
+
1192
+ @add_start_docstrings(
1193
+ """
1194
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1195
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1196
+ """,
1197
+ ALBERT_START_DOCSTRING,
1198
+ )
1199
+ class AlbertForQuestionAnswering(AlbertPreTrainedModel):
1200
+ def __init__(self, config: AlbertConfig):
1201
+ super().__init__(config)
1202
+ self.num_labels = config.num_labels
1203
+
1204
+ self.albert = AlbertModel(config, add_pooling_layer=False)
1205
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1206
+
1207
+ # Initialize weights and apply final processing
1208
+ self.post_init()
1209
+
1210
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1211
+ @add_code_sample_docstrings(
1212
+ checkpoint="twmkn9/albert-base-v2-squad2",
1213
+ output_type=QuestionAnsweringModelOutput,
1214
+ config_class=_CONFIG_FOR_DOC,
1215
+ qa_target_start_index=12,
1216
+ qa_target_end_index=13,
1217
+ expected_output="'a nice puppet'",
1218
+ expected_loss=7.36,
1219
+ )
1220
+ def forward(
1221
+ self,
1222
+ input_ids: Optional[torch.LongTensor] = None,
1223
+ attention_mask: Optional[torch.FloatTensor] = None,
1224
+ token_type_ids: Optional[torch.LongTensor] = None,
1225
+ position_ids: Optional[torch.LongTensor] = None,
1226
+ head_mask: Optional[torch.FloatTensor] = None,
1227
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1228
+ start_positions: Optional[torch.LongTensor] = None,
1229
+ end_positions: Optional[torch.LongTensor] = None,
1230
+ output_attentions: Optional[bool] = None,
1231
+ output_hidden_states: Optional[bool] = None,
1232
+ return_dict: Optional[bool] = None,
1233
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
1234
+ r"""
1235
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1236
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1237
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1238
+ are not taken into account for computing the loss.
1239
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1240
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1241
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1242
+ are not taken into account for computing the loss.
1243
+ """
1244
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1245
+
1246
+ outputs = self.albert(
1247
+ input_ids=input_ids,
1248
+ attention_mask=attention_mask,
1249
+ token_type_ids=token_type_ids,
1250
+ position_ids=position_ids,
1251
+ head_mask=head_mask,
1252
+ inputs_embeds=inputs_embeds,
1253
+ output_attentions=output_attentions,
1254
+ output_hidden_states=output_hidden_states,
1255
+ return_dict=return_dict,
1256
+ )
1257
+
1258
+ sequence_output = outputs[0]
1259
+
1260
+ logits: torch.Tensor = self.qa_outputs(sequence_output)
1261
+ start_logits, end_logits = logits.split(1, dim=-1)
1262
+ start_logits = start_logits.squeeze(-1).contiguous()
1263
+ end_logits = end_logits.squeeze(-1).contiguous()
1264
+
1265
+ total_loss = None
1266
+ if start_positions is not None and end_positions is not None:
1267
+ # If we are on multi-GPU, split add a dimension
1268
+ if len(start_positions.size()) > 1:
1269
+ start_positions = start_positions.squeeze(-1)
1270
+ if len(end_positions.size()) > 1:
1271
+ end_positions = end_positions.squeeze(-1)
1272
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1273
+ ignored_index = start_logits.size(1)
1274
+ start_positions = start_positions.clamp(0, ignored_index)
1275
+ end_positions = end_positions.clamp(0, ignored_index)
1276
+
1277
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1278
+ start_loss = loss_fct(start_logits, start_positions)
1279
+ end_loss = loss_fct(end_logits, end_positions)
1280
+ total_loss = (start_loss + end_loss) / 2
1281
+
1282
+ if not return_dict:
1283
+ output = (start_logits, end_logits) + outputs[2:]
1284
+ return ((total_loss,) + output) if total_loss is not None else output
1285
+
1286
+ return QuestionAnsweringModelOutput(
1287
+ loss=total_loss,
1288
+ start_logits=start_logits,
1289
+ end_logits=end_logits,
1290
+ hidden_states=outputs.hidden_states,
1291
+ attentions=outputs.attentions,
1292
+ )
1293
+
1294
+
1295
+ @add_start_docstrings(
1296
+ """
1297
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1298
+ softmax) e.g. for RocStories/SWAG tasks.
1299
+ """,
1300
+ ALBERT_START_DOCSTRING,
1301
+ )
1302
+ class AlbertForMultipleChoice(AlbertPreTrainedModel):
1303
+ def __init__(self, config: AlbertConfig):
1304
+ super().__init__(config)
1305
+
1306
+ self.albert = AlbertModel(config)
1307
+ self.dropout = nn.Dropout(config.classifier_dropout_prob)
1308
+ self.classifier = nn.Linear(config.hidden_size, 1)
1309
+
1310
+ # Initialize weights and apply final processing
1311
+ self.post_init()
1312
+
1313
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1314
+ @add_code_sample_docstrings(
1315
+ checkpoint=_CHECKPOINT_FOR_DOC,
1316
+ output_type=MultipleChoiceModelOutput,
1317
+ config_class=_CONFIG_FOR_DOC,
1318
+ )
1319
+ def forward(
1320
+ self,
1321
+ input_ids: Optional[torch.LongTensor] = None,
1322
+ attention_mask: Optional[torch.FloatTensor] = None,
1323
+ token_type_ids: Optional[torch.LongTensor] = None,
1324
+ position_ids: Optional[torch.LongTensor] = None,
1325
+ head_mask: Optional[torch.FloatTensor] = None,
1326
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1327
+ labels: Optional[torch.LongTensor] = None,
1328
+ output_attentions: Optional[bool] = None,
1329
+ output_hidden_states: Optional[bool] = None,
1330
+ return_dict: Optional[bool] = None,
1331
+ ) -> Union[AlbertForPreTrainingOutput, Tuple]:
1332
+ r"""
1333
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1334
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1335
+ num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see
1336
+ *input_ids* above)
1337
+ """
1338
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1339
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1340
+
1341
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1342
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1343
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1344
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1345
+ inputs_embeds = (
1346
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1347
+ if inputs_embeds is not None
1348
+ else None
1349
+ )
1350
+ outputs = self.albert(
1351
+ input_ids,
1352
+ attention_mask=attention_mask,
1353
+ token_type_ids=token_type_ids,
1354
+ position_ids=position_ids,
1355
+ head_mask=head_mask,
1356
+ inputs_embeds=inputs_embeds,
1357
+ output_attentions=output_attentions,
1358
+ output_hidden_states=output_hidden_states,
1359
+ return_dict=return_dict,
1360
+ )
1361
+
1362
+ pooled_output = outputs[1]
1363
+
1364
+ pooled_output = self.dropout(pooled_output)
1365
+ logits: torch.Tensor = self.classifier(pooled_output)
1366
+ reshaped_logits = logits.view(-1, num_choices)
1367
+
1368
+ loss = None
1369
+ if labels is not None:
1370
+ loss_fct = CrossEntropyLoss()
1371
+ loss = loss_fct(reshaped_logits, labels)
1372
+
1373
+ if not return_dict:
1374
+ output = (reshaped_logits,) + outputs[2:]
1375
+ return ((loss,) + output) if loss is not None else output
1376
+
1377
+ return MultipleChoiceModelOutput(
1378
+ loss=loss,
1379
+ logits=reshaped_logits,
1380
+ hidden_states=outputs.hidden_states,
1381
+ attentions=outputs.attentions,
1382
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/modeling_flax_albert.py ADDED
@@ -0,0 +1,1121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Callable, Optional, Tuple
17
+
18
+ import flax
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen.attention import dot_product_attention_weights
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+
28
+ from ...modeling_flax_outputs import (
29
+ FlaxBaseModelOutput,
30
+ FlaxBaseModelOutputWithPooling,
31
+ FlaxMaskedLMOutput,
32
+ FlaxMultipleChoiceModelOutput,
33
+ FlaxQuestionAnsweringModelOutput,
34
+ FlaxSequenceClassifierOutput,
35
+ FlaxTokenClassifierOutput,
36
+ )
37
+ from ...modeling_flax_utils import (
38
+ ACT2FN,
39
+ FlaxPreTrainedModel,
40
+ append_call_sample_docstring,
41
+ append_replace_return_docstrings,
42
+ overwrite_call_docstring,
43
+ )
44
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
45
+ from .configuration_albert import AlbertConfig
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+ _CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
51
+ _CONFIG_FOR_DOC = "AlbertConfig"
52
+
53
+
54
+ @flax.struct.dataclass
55
+ class FlaxAlbertForPreTrainingOutput(ModelOutput):
56
+ """
57
+ Output type of [`FlaxAlbertForPreTraining`].
58
+
59
+ Args:
60
+ prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
61
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
62
+ sop_logits (`jnp.ndarray` of shape `(batch_size, 2)`):
63
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
64
+ before SoftMax).
65
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
66
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
67
+ `(batch_size, sequence_length, hidden_size)`.
68
+
69
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
70
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
71
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
72
+ sequence_length)`.
73
+
74
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
75
+ heads.
76
+ """
77
+
78
+ prediction_logits: jnp.ndarray = None
79
+ sop_logits: jnp.ndarray = None
80
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
81
+ attentions: Optional[Tuple[jnp.ndarray]] = None
82
+
83
+
84
+ ALBERT_START_DOCSTRING = r"""
85
+
86
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
87
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
88
+
89
+ This model is also a
90
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
91
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
92
+ behavior.
93
+
94
+ Finally, this model supports inherent JAX features such as:
95
+
96
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
97
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
98
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
99
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
100
+
101
+ Parameters:
102
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
103
+ Initializing with a config file does not load the weights associated with the model, only the
104
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
105
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
106
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
107
+ `jax.numpy.bfloat16` (on TPUs).
108
+
109
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
110
+ specified all the computation will be performed with the given `dtype`.
111
+
112
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
113
+ parameters.**
114
+
115
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
116
+ [`~FlaxPreTrainedModel.to_bf16`].
117
+ """
118
+
119
+ ALBERT_INPUTS_DOCSTRING = r"""
120
+ Args:
121
+ input_ids (`numpy.ndarray` of shape `({0})`):
122
+ Indices of input sequence tokens in the vocabulary.
123
+
124
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
125
+ [`PreTrainedTokenizer.__call__`] for details.
126
+
127
+ [What are input IDs?](../glossary#input-ids)
128
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
129
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
130
+
131
+ - 1 for tokens that are **not masked**,
132
+ - 0 for tokens that are **masked**.
133
+
134
+ [What are attention masks?](../glossary#attention-mask)
135
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
136
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
137
+ 1]`:
138
+
139
+ - 0 corresponds to a *sentence A* token,
140
+ - 1 corresponds to a *sentence B* token.
141
+
142
+ [What are token type IDs?](../glossary#token-type-ids)
143
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
144
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
145
+ config.max_position_embeddings - 1]`.
146
+ return_dict (`bool`, *optional*):
147
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
148
+
149
+ """
150
+
151
+
152
+ class FlaxAlbertEmbeddings(nn.Module):
153
+ """Construct the embeddings from word, position and token_type embeddings."""
154
+
155
+ config: AlbertConfig
156
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
157
+
158
+ def setup(self):
159
+ self.word_embeddings = nn.Embed(
160
+ self.config.vocab_size,
161
+ self.config.embedding_size,
162
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
163
+ )
164
+ self.position_embeddings = nn.Embed(
165
+ self.config.max_position_embeddings,
166
+ self.config.embedding_size,
167
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
168
+ )
169
+ self.token_type_embeddings = nn.Embed(
170
+ self.config.type_vocab_size,
171
+ self.config.embedding_size,
172
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
173
+ )
174
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
175
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
176
+
177
+ def __call__(self, input_ids, token_type_ids, position_ids, deterministic: bool = True):
178
+ # Embed
179
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
180
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
181
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
182
+
183
+ # Sum all embeddings
184
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
185
+
186
+ # Layer Norm
187
+ hidden_states = self.LayerNorm(hidden_states)
188
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
189
+ return hidden_states
190
+
191
+
192
+ class FlaxAlbertSelfAttention(nn.Module):
193
+ config: AlbertConfig
194
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
195
+
196
+ def setup(self):
197
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
198
+ raise ValueError(
199
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
200
+ " : {self.config.num_attention_heads}"
201
+ )
202
+
203
+ self.query = nn.Dense(
204
+ self.config.hidden_size,
205
+ dtype=self.dtype,
206
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
207
+ )
208
+ self.key = nn.Dense(
209
+ self.config.hidden_size,
210
+ dtype=self.dtype,
211
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
212
+ )
213
+ self.value = nn.Dense(
214
+ self.config.hidden_size,
215
+ dtype=self.dtype,
216
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
217
+ )
218
+ self.dense = nn.Dense(
219
+ self.config.hidden_size,
220
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
221
+ dtype=self.dtype,
222
+ )
223
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
224
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
225
+
226
+ def __call__(self, hidden_states, attention_mask, deterministic=True, output_attentions: bool = False):
227
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
228
+
229
+ query_states = self.query(hidden_states).reshape(
230
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
231
+ )
232
+ value_states = self.value(hidden_states).reshape(
233
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
234
+ )
235
+ key_states = self.key(hidden_states).reshape(
236
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
237
+ )
238
+
239
+ # Convert the boolean attention mask to an attention bias.
240
+ if attention_mask is not None:
241
+ # attention mask in the form of attention bias
242
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
243
+ attention_bias = lax.select(
244
+ attention_mask > 0,
245
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
246
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
247
+ )
248
+ else:
249
+ attention_bias = None
250
+
251
+ dropout_rng = None
252
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
253
+ dropout_rng = self.make_rng("dropout")
254
+
255
+ attn_weights = dot_product_attention_weights(
256
+ query_states,
257
+ key_states,
258
+ bias=attention_bias,
259
+ dropout_rng=dropout_rng,
260
+ dropout_rate=self.config.attention_probs_dropout_prob,
261
+ broadcast_dropout=True,
262
+ deterministic=deterministic,
263
+ dtype=self.dtype,
264
+ precision=None,
265
+ )
266
+
267
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
268
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
269
+
270
+ projected_attn_output = self.dense(attn_output)
271
+ projected_attn_output = self.dropout(projected_attn_output, deterministic=deterministic)
272
+ layernormed_attn_output = self.LayerNorm(projected_attn_output + hidden_states)
273
+ outputs = (layernormed_attn_output, attn_weights) if output_attentions else (layernormed_attn_output,)
274
+ return outputs
275
+
276
+
277
+ class FlaxAlbertLayer(nn.Module):
278
+ config: AlbertConfig
279
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
280
+
281
+ def setup(self):
282
+ self.attention = FlaxAlbertSelfAttention(self.config, dtype=self.dtype)
283
+ self.ffn = nn.Dense(
284
+ self.config.intermediate_size,
285
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
286
+ dtype=self.dtype,
287
+ )
288
+ self.activation = ACT2FN[self.config.hidden_act]
289
+ self.ffn_output = nn.Dense(
290
+ self.config.hidden_size,
291
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
292
+ dtype=self.dtype,
293
+ )
294
+ self.full_layer_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
295
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
296
+
297
+ def __call__(
298
+ self,
299
+ hidden_states,
300
+ attention_mask,
301
+ deterministic: bool = True,
302
+ output_attentions: bool = False,
303
+ ):
304
+ attention_outputs = self.attention(
305
+ hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions
306
+ )
307
+ attention_output = attention_outputs[0]
308
+ ffn_output = self.ffn(attention_output)
309
+ ffn_output = self.activation(ffn_output)
310
+ ffn_output = self.ffn_output(ffn_output)
311
+ ffn_output = self.dropout(ffn_output, deterministic=deterministic)
312
+ hidden_states = self.full_layer_layer_norm(ffn_output + attention_output)
313
+
314
+ outputs = (hidden_states,)
315
+
316
+ if output_attentions:
317
+ outputs += (attention_outputs[1],)
318
+ return outputs
319
+
320
+
321
+ class FlaxAlbertLayerCollection(nn.Module):
322
+ config: AlbertConfig
323
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
324
+
325
+ def setup(self):
326
+ self.layers = [
327
+ FlaxAlbertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.inner_group_num)
328
+ ]
329
+
330
+ def __call__(
331
+ self,
332
+ hidden_states,
333
+ attention_mask,
334
+ deterministic: bool = True,
335
+ output_attentions: bool = False,
336
+ output_hidden_states: bool = False,
337
+ ):
338
+ layer_hidden_states = ()
339
+ layer_attentions = ()
340
+
341
+ for layer_index, albert_layer in enumerate(self.layers):
342
+ layer_output = albert_layer(
343
+ hidden_states,
344
+ attention_mask,
345
+ deterministic=deterministic,
346
+ output_attentions=output_attentions,
347
+ )
348
+ hidden_states = layer_output[0]
349
+
350
+ if output_attentions:
351
+ layer_attentions = layer_attentions + (layer_output[1],)
352
+
353
+ if output_hidden_states:
354
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
355
+
356
+ outputs = (hidden_states,)
357
+ if output_hidden_states:
358
+ outputs = outputs + (layer_hidden_states,)
359
+ if output_attentions:
360
+ outputs = outputs + (layer_attentions,)
361
+ return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
362
+
363
+
364
+ class FlaxAlbertLayerCollections(nn.Module):
365
+ config: AlbertConfig
366
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
367
+ layer_index: Optional[str] = None
368
+
369
+ def setup(self):
370
+ self.albert_layers = FlaxAlbertLayerCollection(self.config, dtype=self.dtype)
371
+
372
+ def __call__(
373
+ self,
374
+ hidden_states,
375
+ attention_mask,
376
+ deterministic: bool = True,
377
+ output_attentions: bool = False,
378
+ output_hidden_states: bool = False,
379
+ ):
380
+ outputs = self.albert_layers(
381
+ hidden_states,
382
+ attention_mask,
383
+ deterministic=deterministic,
384
+ output_attentions=output_attentions,
385
+ output_hidden_states=output_hidden_states,
386
+ )
387
+ return outputs
388
+
389
+
390
+ class FlaxAlbertLayerGroups(nn.Module):
391
+ config: AlbertConfig
392
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
393
+
394
+ def setup(self):
395
+ self.layers = [
396
+ FlaxAlbertLayerCollections(self.config, name=str(i), layer_index=str(i), dtype=self.dtype)
397
+ for i in range(self.config.num_hidden_groups)
398
+ ]
399
+
400
+ def __call__(
401
+ self,
402
+ hidden_states,
403
+ attention_mask,
404
+ deterministic: bool = True,
405
+ output_attentions: bool = False,
406
+ output_hidden_states: bool = False,
407
+ return_dict: bool = True,
408
+ ):
409
+ all_attentions = () if output_attentions else None
410
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
411
+
412
+ for i in range(self.config.num_hidden_layers):
413
+ # Index of the hidden group
414
+ group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
415
+ layer_group_output = self.layers[group_idx](
416
+ hidden_states,
417
+ attention_mask,
418
+ deterministic=deterministic,
419
+ output_attentions=output_attentions,
420
+ output_hidden_states=output_hidden_states,
421
+ )
422
+ hidden_states = layer_group_output[0]
423
+
424
+ if output_attentions:
425
+ all_attentions = all_attentions + layer_group_output[-1]
426
+
427
+ if output_hidden_states:
428
+ all_hidden_states = all_hidden_states + (hidden_states,)
429
+
430
+ if not return_dict:
431
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
432
+ return FlaxBaseModelOutput(
433
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
434
+ )
435
+
436
+
437
+ class FlaxAlbertEncoder(nn.Module):
438
+ config: AlbertConfig
439
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
440
+
441
+ def setup(self):
442
+ self.embedding_hidden_mapping_in = nn.Dense(
443
+ self.config.hidden_size,
444
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
445
+ dtype=self.dtype,
446
+ )
447
+ self.albert_layer_groups = FlaxAlbertLayerGroups(self.config, dtype=self.dtype)
448
+
449
+ def __call__(
450
+ self,
451
+ hidden_states,
452
+ attention_mask,
453
+ deterministic: bool = True,
454
+ output_attentions: bool = False,
455
+ output_hidden_states: bool = False,
456
+ return_dict: bool = True,
457
+ ):
458
+ hidden_states = self.embedding_hidden_mapping_in(hidden_states)
459
+ return self.albert_layer_groups(
460
+ hidden_states,
461
+ attention_mask,
462
+ deterministic=deterministic,
463
+ output_attentions=output_attentions,
464
+ output_hidden_states=output_hidden_states,
465
+ )
466
+
467
+
468
+ class FlaxAlbertOnlyMLMHead(nn.Module):
469
+ config: AlbertConfig
470
+ dtype: jnp.dtype = jnp.float32
471
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
472
+
473
+ def setup(self):
474
+ self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype)
475
+ self.activation = ACT2FN[self.config.hidden_act]
476
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
477
+ self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
478
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
479
+
480
+ def __call__(self, hidden_states, shared_embedding=None):
481
+ hidden_states = self.dense(hidden_states)
482
+ hidden_states = self.activation(hidden_states)
483
+ hidden_states = self.LayerNorm(hidden_states)
484
+
485
+ if shared_embedding is not None:
486
+ hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
487
+ else:
488
+ hidden_states = self.decoder(hidden_states)
489
+
490
+ hidden_states += self.bias
491
+ return hidden_states
492
+
493
+
494
+ class FlaxAlbertSOPHead(nn.Module):
495
+ config: AlbertConfig
496
+ dtype: jnp.dtype = jnp.float32
497
+
498
+ def setup(self):
499
+ self.dropout = nn.Dropout(self.config.classifier_dropout_prob)
500
+ self.classifier = nn.Dense(2, dtype=self.dtype)
501
+
502
+ def __call__(self, pooled_output, deterministic=True):
503
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
504
+ logits = self.classifier(pooled_output)
505
+ return logits
506
+
507
+
508
+ class FlaxAlbertPreTrainedModel(FlaxPreTrainedModel):
509
+ """
510
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
511
+ models.
512
+ """
513
+
514
+ config_class = AlbertConfig
515
+ base_model_prefix = "albert"
516
+ module_class: nn.Module = None
517
+
518
+ def __init__(
519
+ self,
520
+ config: AlbertConfig,
521
+ input_shape: Tuple = (1, 1),
522
+ seed: int = 0,
523
+ dtype: jnp.dtype = jnp.float32,
524
+ _do_init: bool = True,
525
+ **kwargs,
526
+ ):
527
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
528
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
529
+
530
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
531
+ # init input tensors
532
+ input_ids = jnp.zeros(input_shape, dtype="i4")
533
+ token_type_ids = jnp.zeros_like(input_ids)
534
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
535
+ attention_mask = jnp.ones_like(input_ids)
536
+
537
+ params_rng, dropout_rng = jax.random.split(rng)
538
+ rngs = {"params": params_rng, "dropout": dropout_rng}
539
+
540
+ random_params = self.module.init(
541
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, return_dict=False
542
+ )["params"]
543
+
544
+ if params is not None:
545
+ random_params = flatten_dict(unfreeze(random_params))
546
+ params = flatten_dict(unfreeze(params))
547
+ for missing_key in self._missing_keys:
548
+ params[missing_key] = random_params[missing_key]
549
+ self._missing_keys = set()
550
+ return freeze(unflatten_dict(params))
551
+ else:
552
+ return random_params
553
+
554
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
555
+ def __call__(
556
+ self,
557
+ input_ids,
558
+ attention_mask=None,
559
+ token_type_ids=None,
560
+ position_ids=None,
561
+ params: dict = None,
562
+ dropout_rng: jax.random.PRNGKey = None,
563
+ train: bool = False,
564
+ output_attentions: Optional[bool] = None,
565
+ output_hidden_states: Optional[bool] = None,
566
+ return_dict: Optional[bool] = None,
567
+ ):
568
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
569
+ output_hidden_states = (
570
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
571
+ )
572
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
573
+
574
+ # init input tensors if not passed
575
+ if token_type_ids is None:
576
+ token_type_ids = jnp.zeros_like(input_ids)
577
+
578
+ if position_ids is None:
579
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
580
+
581
+ if attention_mask is None:
582
+ attention_mask = jnp.ones_like(input_ids)
583
+
584
+ # Handle any PRNG if needed
585
+ rngs = {}
586
+ if dropout_rng is not None:
587
+ rngs["dropout"] = dropout_rng
588
+
589
+ return self.module.apply(
590
+ {"params": params or self.params},
591
+ jnp.array(input_ids, dtype="i4"),
592
+ jnp.array(attention_mask, dtype="i4"),
593
+ jnp.array(token_type_ids, dtype="i4"),
594
+ jnp.array(position_ids, dtype="i4"),
595
+ not train,
596
+ output_attentions,
597
+ output_hidden_states,
598
+ return_dict,
599
+ rngs=rngs,
600
+ )
601
+
602
+
603
+ class FlaxAlbertModule(nn.Module):
604
+ config: AlbertConfig
605
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
606
+ add_pooling_layer: bool = True
607
+
608
+ def setup(self):
609
+ self.embeddings = FlaxAlbertEmbeddings(self.config, dtype=self.dtype)
610
+ self.encoder = FlaxAlbertEncoder(self.config, dtype=self.dtype)
611
+ if self.add_pooling_layer:
612
+ self.pooler = nn.Dense(
613
+ self.config.hidden_size,
614
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
615
+ dtype=self.dtype,
616
+ name="pooler",
617
+ )
618
+ self.pooler_activation = nn.tanh
619
+ else:
620
+ self.pooler = None
621
+ self.pooler_activation = None
622
+
623
+ def __call__(
624
+ self,
625
+ input_ids,
626
+ attention_mask,
627
+ token_type_ids: Optional[np.ndarray] = None,
628
+ position_ids: Optional[np.ndarray] = None,
629
+ deterministic: bool = True,
630
+ output_attentions: bool = False,
631
+ output_hidden_states: bool = False,
632
+ return_dict: bool = True,
633
+ ):
634
+ # make sure `token_type_ids` is correctly initialized when not passed
635
+ if token_type_ids is None:
636
+ token_type_ids = jnp.zeros_like(input_ids)
637
+
638
+ # make sure `position_ids` is correctly initialized when not passed
639
+ if position_ids is None:
640
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
641
+
642
+ hidden_states = self.embeddings(input_ids, token_type_ids, position_ids, deterministic=deterministic)
643
+
644
+ outputs = self.encoder(
645
+ hidden_states,
646
+ attention_mask,
647
+ deterministic=deterministic,
648
+ output_attentions=output_attentions,
649
+ output_hidden_states=output_hidden_states,
650
+ return_dict=return_dict,
651
+ )
652
+ hidden_states = outputs[0]
653
+ if self.add_pooling_layer:
654
+ pooled = self.pooler(hidden_states[:, 0])
655
+ pooled = self.pooler_activation(pooled)
656
+ else:
657
+ pooled = None
658
+
659
+ if not return_dict:
660
+ # if pooled is None, don't return it
661
+ if pooled is None:
662
+ return (hidden_states,) + outputs[1:]
663
+ return (hidden_states, pooled) + outputs[1:]
664
+
665
+ return FlaxBaseModelOutputWithPooling(
666
+ last_hidden_state=hidden_states,
667
+ pooler_output=pooled,
668
+ hidden_states=outputs.hidden_states,
669
+ attentions=outputs.attentions,
670
+ )
671
+
672
+
673
+ @add_start_docstrings(
674
+ "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
675
+ ALBERT_START_DOCSTRING,
676
+ )
677
+ class FlaxAlbertModel(FlaxAlbertPreTrainedModel):
678
+ module_class = FlaxAlbertModule
679
+
680
+
681
+ append_call_sample_docstring(FlaxAlbertModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC)
682
+
683
+
684
+ class FlaxAlbertForPreTrainingModule(nn.Module):
685
+ config: AlbertConfig
686
+ dtype: jnp.dtype = jnp.float32
687
+
688
+ def setup(self):
689
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
690
+ self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype)
691
+ self.sop_classifier = FlaxAlbertSOPHead(config=self.config, dtype=self.dtype)
692
+
693
+ def __call__(
694
+ self,
695
+ input_ids,
696
+ attention_mask,
697
+ token_type_ids,
698
+ position_ids,
699
+ deterministic: bool = True,
700
+ output_attentions: bool = False,
701
+ output_hidden_states: bool = False,
702
+ return_dict: bool = True,
703
+ ):
704
+ # Model
705
+ outputs = self.albert(
706
+ input_ids,
707
+ attention_mask,
708
+ token_type_ids,
709
+ position_ids,
710
+ deterministic=deterministic,
711
+ output_attentions=output_attentions,
712
+ output_hidden_states=output_hidden_states,
713
+ return_dict=return_dict,
714
+ )
715
+
716
+ if self.config.tie_word_embeddings:
717
+ shared_embedding = self.albert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
718
+ else:
719
+ shared_embedding = None
720
+
721
+ hidden_states = outputs[0]
722
+ pooled_output = outputs[1]
723
+
724
+ prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding)
725
+ sop_scores = self.sop_classifier(pooled_output, deterministic=deterministic)
726
+
727
+ if not return_dict:
728
+ return (prediction_scores, sop_scores) + outputs[2:]
729
+
730
+ return FlaxAlbertForPreTrainingOutput(
731
+ prediction_logits=prediction_scores,
732
+ sop_logits=sop_scores,
733
+ hidden_states=outputs.hidden_states,
734
+ attentions=outputs.attentions,
735
+ )
736
+
737
+
738
+ @add_start_docstrings(
739
+ """
740
+ Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
741
+ `sentence order prediction (classification)` head.
742
+ """,
743
+ ALBERT_START_DOCSTRING,
744
+ )
745
+ class FlaxAlbertForPreTraining(FlaxAlbertPreTrainedModel):
746
+ module_class = FlaxAlbertForPreTrainingModule
747
+
748
+
749
+ FLAX_ALBERT_FOR_PRETRAINING_DOCSTRING = """
750
+ Returns:
751
+
752
+ Example:
753
+
754
+ ```python
755
+ >>> from transformers import AutoTokenizer, FlaxAlbertForPreTraining
756
+
757
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
758
+ >>> model = FlaxAlbertForPreTraining.from_pretrained("albert/albert-base-v2")
759
+
760
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
761
+ >>> outputs = model(**inputs)
762
+
763
+ >>> prediction_logits = outputs.prediction_logits
764
+ >>> seq_relationship_logits = outputs.sop_logits
765
+ ```
766
+ """
767
+
768
+ overwrite_call_docstring(
769
+ FlaxAlbertForPreTraining,
770
+ ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_ALBERT_FOR_PRETRAINING_DOCSTRING,
771
+ )
772
+ append_replace_return_docstrings(
773
+ FlaxAlbertForPreTraining, output_type=FlaxAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
774
+ )
775
+
776
+
777
+ class FlaxAlbertForMaskedLMModule(nn.Module):
778
+ config: AlbertConfig
779
+ dtype: jnp.dtype = jnp.float32
780
+
781
+ def setup(self):
782
+ self.albert = FlaxAlbertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype)
783
+ self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype)
784
+
785
+ def __call__(
786
+ self,
787
+ input_ids,
788
+ attention_mask,
789
+ token_type_ids,
790
+ position_ids,
791
+ deterministic: bool = True,
792
+ output_attentions: bool = False,
793
+ output_hidden_states: bool = False,
794
+ return_dict: bool = True,
795
+ ):
796
+ # Model
797
+ outputs = self.albert(
798
+ input_ids,
799
+ attention_mask,
800
+ token_type_ids,
801
+ position_ids,
802
+ deterministic=deterministic,
803
+ output_attentions=output_attentions,
804
+ output_hidden_states=output_hidden_states,
805
+ return_dict=return_dict,
806
+ )
807
+
808
+ hidden_states = outputs[0]
809
+ if self.config.tie_word_embeddings:
810
+ shared_embedding = self.albert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
811
+ else:
812
+ shared_embedding = None
813
+
814
+ # Compute the prediction scores
815
+ logits = self.predictions(hidden_states, shared_embedding=shared_embedding)
816
+
817
+ if not return_dict:
818
+ return (logits,) + outputs[1:]
819
+
820
+ return FlaxMaskedLMOutput(
821
+ logits=logits,
822
+ hidden_states=outputs.hidden_states,
823
+ attentions=outputs.attentions,
824
+ )
825
+
826
+
827
+ @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
828
+ class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel):
829
+ module_class = FlaxAlbertForMaskedLMModule
830
+
831
+
832
+ append_call_sample_docstring(
833
+ FlaxAlbertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC, revision="refs/pr/11"
834
+ )
835
+
836
+
837
+ class FlaxAlbertForSequenceClassificationModule(nn.Module):
838
+ config: AlbertConfig
839
+ dtype: jnp.dtype = jnp.float32
840
+
841
+ def setup(self):
842
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
843
+ classifier_dropout = (
844
+ self.config.classifier_dropout_prob
845
+ if self.config.classifier_dropout_prob is not None
846
+ else self.config.hidden_dropout_prob
847
+ )
848
+ self.dropout = nn.Dropout(rate=classifier_dropout)
849
+ self.classifier = nn.Dense(
850
+ self.config.num_labels,
851
+ dtype=self.dtype,
852
+ )
853
+
854
+ def __call__(
855
+ self,
856
+ input_ids,
857
+ attention_mask,
858
+ token_type_ids,
859
+ position_ids,
860
+ deterministic: bool = True,
861
+ output_attentions: bool = False,
862
+ output_hidden_states: bool = False,
863
+ return_dict: bool = True,
864
+ ):
865
+ # Model
866
+ outputs = self.albert(
867
+ input_ids,
868
+ attention_mask,
869
+ token_type_ids,
870
+ position_ids,
871
+ deterministic=deterministic,
872
+ output_attentions=output_attentions,
873
+ output_hidden_states=output_hidden_states,
874
+ return_dict=return_dict,
875
+ )
876
+
877
+ pooled_output = outputs[1]
878
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
879
+ logits = self.classifier(pooled_output)
880
+
881
+ if not return_dict:
882
+ return (logits,) + outputs[2:]
883
+
884
+ return FlaxSequenceClassifierOutput(
885
+ logits=logits,
886
+ hidden_states=outputs.hidden_states,
887
+ attentions=outputs.attentions,
888
+ )
889
+
890
+
891
+ @add_start_docstrings(
892
+ """
893
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
894
+ output) e.g. for GLUE tasks.
895
+ """,
896
+ ALBERT_START_DOCSTRING,
897
+ )
898
+ class FlaxAlbertForSequenceClassification(FlaxAlbertPreTrainedModel):
899
+ module_class = FlaxAlbertForSequenceClassificationModule
900
+
901
+
902
+ append_call_sample_docstring(
903
+ FlaxAlbertForSequenceClassification,
904
+ _CHECKPOINT_FOR_DOC,
905
+ FlaxSequenceClassifierOutput,
906
+ _CONFIG_FOR_DOC,
907
+ )
908
+
909
+
910
+ class FlaxAlbertForMultipleChoiceModule(nn.Module):
911
+ config: AlbertConfig
912
+ dtype: jnp.dtype = jnp.float32
913
+
914
+ def setup(self):
915
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype)
916
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
917
+ self.classifier = nn.Dense(1, dtype=self.dtype)
918
+
919
+ def __call__(
920
+ self,
921
+ input_ids,
922
+ attention_mask,
923
+ token_type_ids,
924
+ position_ids,
925
+ deterministic: bool = True,
926
+ output_attentions: bool = False,
927
+ output_hidden_states: bool = False,
928
+ return_dict: bool = True,
929
+ ):
930
+ num_choices = input_ids.shape[1]
931
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
932
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
933
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
934
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
935
+
936
+ # Model
937
+ outputs = self.albert(
938
+ input_ids,
939
+ attention_mask,
940
+ token_type_ids,
941
+ position_ids,
942
+ deterministic=deterministic,
943
+ output_attentions=output_attentions,
944
+ output_hidden_states=output_hidden_states,
945
+ return_dict=return_dict,
946
+ )
947
+
948
+ pooled_output = outputs[1]
949
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
950
+ logits = self.classifier(pooled_output)
951
+
952
+ reshaped_logits = logits.reshape(-1, num_choices)
953
+
954
+ if not return_dict:
955
+ return (reshaped_logits,) + outputs[2:]
956
+
957
+ return FlaxMultipleChoiceModelOutput(
958
+ logits=reshaped_logits,
959
+ hidden_states=outputs.hidden_states,
960
+ attentions=outputs.attentions,
961
+ )
962
+
963
+
964
+ @add_start_docstrings(
965
+ """
966
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
967
+ softmax) e.g. for RocStories/SWAG tasks.
968
+ """,
969
+ ALBERT_START_DOCSTRING,
970
+ )
971
+ class FlaxAlbertForMultipleChoice(FlaxAlbertPreTrainedModel):
972
+ module_class = FlaxAlbertForMultipleChoiceModule
973
+
974
+
975
+ overwrite_call_docstring(
976
+ FlaxAlbertForMultipleChoice, ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
977
+ )
978
+ append_call_sample_docstring(
979
+ FlaxAlbertForMultipleChoice,
980
+ _CHECKPOINT_FOR_DOC,
981
+ FlaxMultipleChoiceModelOutput,
982
+ _CONFIG_FOR_DOC,
983
+ )
984
+
985
+
986
+ class FlaxAlbertForTokenClassificationModule(nn.Module):
987
+ config: AlbertConfig
988
+ dtype: jnp.dtype = jnp.float32
989
+
990
+ def setup(self):
991
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
992
+ classifier_dropout = (
993
+ self.config.classifier_dropout_prob
994
+ if self.config.classifier_dropout_prob is not None
995
+ else self.config.hidden_dropout_prob
996
+ )
997
+ self.dropout = nn.Dropout(rate=classifier_dropout)
998
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
999
+
1000
+ def __call__(
1001
+ self,
1002
+ input_ids,
1003
+ attention_mask,
1004
+ token_type_ids,
1005
+ position_ids,
1006
+ deterministic: bool = True,
1007
+ output_attentions: bool = False,
1008
+ output_hidden_states: bool = False,
1009
+ return_dict: bool = True,
1010
+ ):
1011
+ # Model
1012
+ outputs = self.albert(
1013
+ input_ids,
1014
+ attention_mask,
1015
+ token_type_ids,
1016
+ position_ids,
1017
+ deterministic=deterministic,
1018
+ output_attentions=output_attentions,
1019
+ output_hidden_states=output_hidden_states,
1020
+ return_dict=return_dict,
1021
+ )
1022
+
1023
+ hidden_states = outputs[0]
1024
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1025
+ logits = self.classifier(hidden_states)
1026
+
1027
+ if not return_dict:
1028
+ return (logits,) + outputs[1:]
1029
+
1030
+ return FlaxTokenClassifierOutput(
1031
+ logits=logits,
1032
+ hidden_states=outputs.hidden_states,
1033
+ attentions=outputs.attentions,
1034
+ )
1035
+
1036
+
1037
+ @add_start_docstrings(
1038
+ """
1039
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1040
+ Named-Entity-Recognition (NER) tasks.
1041
+ """,
1042
+ ALBERT_START_DOCSTRING,
1043
+ )
1044
+ class FlaxAlbertForTokenClassification(FlaxAlbertPreTrainedModel):
1045
+ module_class = FlaxAlbertForTokenClassificationModule
1046
+
1047
+
1048
+ append_call_sample_docstring(
1049
+ FlaxAlbertForTokenClassification,
1050
+ _CHECKPOINT_FOR_DOC,
1051
+ FlaxTokenClassifierOutput,
1052
+ _CONFIG_FOR_DOC,
1053
+ )
1054
+
1055
+
1056
+ class FlaxAlbertForQuestionAnsweringModule(nn.Module):
1057
+ config: AlbertConfig
1058
+ dtype: jnp.dtype = jnp.float32
1059
+
1060
+ def setup(self):
1061
+ self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
1062
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
1063
+
1064
+ def __call__(
1065
+ self,
1066
+ input_ids,
1067
+ attention_mask,
1068
+ token_type_ids,
1069
+ position_ids,
1070
+ deterministic: bool = True,
1071
+ output_attentions: bool = False,
1072
+ output_hidden_states: bool = False,
1073
+ return_dict: bool = True,
1074
+ ):
1075
+ # Model
1076
+ outputs = self.albert(
1077
+ input_ids,
1078
+ attention_mask,
1079
+ token_type_ids,
1080
+ position_ids,
1081
+ deterministic=deterministic,
1082
+ output_attentions=output_attentions,
1083
+ output_hidden_states=output_hidden_states,
1084
+ return_dict=return_dict,
1085
+ )
1086
+
1087
+ hidden_states = outputs[0]
1088
+
1089
+ logits = self.qa_outputs(hidden_states)
1090
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
1091
+ start_logits = start_logits.squeeze(-1)
1092
+ end_logits = end_logits.squeeze(-1)
1093
+
1094
+ if not return_dict:
1095
+ return (start_logits, end_logits) + outputs[1:]
1096
+
1097
+ return FlaxQuestionAnsweringModelOutput(
1098
+ start_logits=start_logits,
1099
+ end_logits=end_logits,
1100
+ hidden_states=outputs.hidden_states,
1101
+ attentions=outputs.attentions,
1102
+ )
1103
+
1104
+
1105
+ @add_start_docstrings(
1106
+ """
1107
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1108
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1109
+ """,
1110
+ ALBERT_START_DOCSTRING,
1111
+ )
1112
+ class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel):
1113
+ module_class = FlaxAlbertForQuestionAnsweringModule
1114
+
1115
+
1116
+ append_call_sample_docstring(
1117
+ FlaxAlbertForQuestionAnswering,
1118
+ _CHECKPOINT_FOR_DOC,
1119
+ FlaxQuestionAnsweringModelOutput,
1120
+ _CONFIG_FOR_DOC,
1121
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/modeling_tf_albert.py ADDED
@@ -0,0 +1,1564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 ALBERT model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import math
22
+ from dataclasses import dataclass
23
+ from typing import Dict, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFBaseModelOutputWithPooling,
32
+ TFMaskedLMOutput,
33
+ TFMultipleChoiceModelOutput,
34
+ TFQuestionAnsweringModelOutput,
35
+ TFSequenceClassifierOutput,
36
+ TFTokenClassifierOutput,
37
+ )
38
+ from ...modeling_tf_utils import (
39
+ TFMaskedLanguageModelingLoss,
40
+ TFModelInputType,
41
+ TFMultipleChoiceLoss,
42
+ TFPreTrainedModel,
43
+ TFQuestionAnsweringLoss,
44
+ TFSequenceClassificationLoss,
45
+ TFTokenClassificationLoss,
46
+ get_initializer,
47
+ keras,
48
+ keras_serializable,
49
+ unpack_inputs,
50
+ )
51
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
52
+ from ...utils import (
53
+ ModelOutput,
54
+ add_code_sample_docstrings,
55
+ add_start_docstrings,
56
+ add_start_docstrings_to_model_forward,
57
+ logging,
58
+ replace_return_docstrings,
59
+ )
60
+ from .configuration_albert import AlbertConfig
61
+
62
+
63
+ logger = logging.get_logger(__name__)
64
+
65
+ _CHECKPOINT_FOR_DOC = "albert/albert-base-v2"
66
+ _CONFIG_FOR_DOC = "AlbertConfig"
67
+
68
+
69
+ from ..deprecated._archive_maps import TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
70
+
71
+
72
+ class TFAlbertPreTrainingLoss:
73
+ """
74
+ Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP +
75
+ MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
76
+ """
77
+
78
+ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
79
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE)
80
+ if self.config.tf_legacy_loss:
81
+ # make sure only labels that are not equal to -100
82
+ # are taken into account as loss
83
+ masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100)
84
+ masked_lm_reduced_logits = tf.boolean_mask(
85
+ tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])),
86
+ mask=masked_lm_active_loss,
87
+ )
88
+ masked_lm_labels = tf.boolean_mask(
89
+ tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss
90
+ )
91
+ sentence_order_active_loss = tf.not_equal(
92
+ tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), -100
93
+ )
94
+ sentence_order_reduced_logits = tf.boolean_mask(
95
+ tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss
96
+ )
97
+ sentence_order_label = tf.boolean_mask(
98
+ tensor=tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), mask=sentence_order_active_loss
99
+ )
100
+ masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits)
101
+ sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits)
102
+ masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0]))
103
+ masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0)
104
+
105
+ return masked_lm_loss + sentence_order_loss
106
+
107
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
108
+ unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0])
109
+ # make sure only labels that are not equal to -100
110
+ # are taken into account for the loss computation
111
+ lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype)
112
+ masked_lm_losses = unmasked_lm_losses * lm_loss_mask
113
+ reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask)
114
+
115
+ sop_logits = tf.reshape(logits[1], (-1, 2))
116
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
117
+ unmasked_sop_loss = loss_fn(y_true=tf.nn.relu(labels["sentence_order_label"]), y_pred=sop_logits)
118
+ sop_loss_mask = tf.cast(labels["sentence_order_label"] != -100, dtype=unmasked_sop_loss.dtype)
119
+
120
+ masked_sop_loss = unmasked_sop_loss * sop_loss_mask
121
+ reduced_masked_sop_loss = tf.reduce_sum(masked_sop_loss) / tf.reduce_sum(sop_loss_mask)
122
+
123
+ return tf.reshape(reduced_masked_lm_loss + reduced_masked_sop_loss, (1,))
124
+
125
+
126
+ class TFAlbertEmbeddings(keras.layers.Layer):
127
+ """Construct the embeddings from word, position and token_type embeddings."""
128
+
129
+ def __init__(self, config: AlbertConfig, **kwargs):
130
+ super().__init__(**kwargs)
131
+
132
+ self.config = config
133
+ self.embedding_size = config.embedding_size
134
+ self.max_position_embeddings = config.max_position_embeddings
135
+ self.initializer_range = config.initializer_range
136
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
137
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
138
+
139
+ def build(self, input_shape=None):
140
+ with tf.name_scope("word_embeddings"):
141
+ self.weight = self.add_weight(
142
+ name="weight",
143
+ shape=[self.config.vocab_size, self.embedding_size],
144
+ initializer=get_initializer(self.initializer_range),
145
+ )
146
+
147
+ with tf.name_scope("token_type_embeddings"):
148
+ self.token_type_embeddings = self.add_weight(
149
+ name="embeddings",
150
+ shape=[self.config.type_vocab_size, self.embedding_size],
151
+ initializer=get_initializer(self.initializer_range),
152
+ )
153
+
154
+ with tf.name_scope("position_embeddings"):
155
+ self.position_embeddings = self.add_weight(
156
+ name="embeddings",
157
+ shape=[self.max_position_embeddings, self.embedding_size],
158
+ initializer=get_initializer(self.initializer_range),
159
+ )
160
+
161
+ if self.built:
162
+ return
163
+ self.built = True
164
+ if getattr(self, "LayerNorm", None) is not None:
165
+ with tf.name_scope(self.LayerNorm.name):
166
+ self.LayerNorm.build([None, None, self.config.embedding_size])
167
+
168
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
169
+ def call(
170
+ self,
171
+ input_ids: tf.Tensor = None,
172
+ position_ids: tf.Tensor = None,
173
+ token_type_ids: tf.Tensor = None,
174
+ inputs_embeds: tf.Tensor = None,
175
+ past_key_values_length=0,
176
+ training: bool = False,
177
+ ) -> tf.Tensor:
178
+ """
179
+ Applies embedding based on inputs tensor.
180
+
181
+ Returns:
182
+ final_embeddings (`tf.Tensor`): output embedding tensor.
183
+ """
184
+ if input_ids is None and inputs_embeds is None:
185
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
186
+
187
+ if input_ids is not None:
188
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
189
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
190
+
191
+ input_shape = shape_list(inputs_embeds)[:-1]
192
+
193
+ if token_type_ids is None:
194
+ token_type_ids = tf.fill(dims=input_shape, value=0)
195
+
196
+ if position_ids is None:
197
+ position_ids = tf.expand_dims(
198
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
199
+ )
200
+
201
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
202
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
203
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
204
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
205
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
206
+
207
+ return final_embeddings
208
+
209
+
210
+ class TFAlbertAttention(keras.layers.Layer):
211
+ """Contains the complete attention sublayer, including both dropouts and layer norm."""
212
+
213
+ def __init__(self, config: AlbertConfig, **kwargs):
214
+ super().__init__(**kwargs)
215
+
216
+ if config.hidden_size % config.num_attention_heads != 0:
217
+ raise ValueError(
218
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
219
+ f"of attention heads ({config.num_attention_heads})"
220
+ )
221
+
222
+ self.num_attention_heads = config.num_attention_heads
223
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
224
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
225
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
226
+ self.output_attentions = config.output_attentions
227
+
228
+ self.query = keras.layers.Dense(
229
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
230
+ )
231
+ self.key = keras.layers.Dense(
232
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
233
+ )
234
+ self.value = keras.layers.Dense(
235
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
236
+ )
237
+ self.dense = keras.layers.Dense(
238
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
239
+ )
240
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
241
+ # Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993
242
+ self.attention_dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
243
+ self.output_dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
244
+ self.config = config
245
+
246
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
247
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
248
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
249
+
250
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
251
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
252
+
253
+ def call(
254
+ self,
255
+ input_tensor: tf.Tensor,
256
+ attention_mask: tf.Tensor,
257
+ head_mask: tf.Tensor,
258
+ output_attentions: bool,
259
+ training: bool = False,
260
+ ) -> Tuple[tf.Tensor]:
261
+ batch_size = shape_list(input_tensor)[0]
262
+ mixed_query_layer = self.query(inputs=input_tensor)
263
+ mixed_key_layer = self.key(inputs=input_tensor)
264
+ mixed_value_layer = self.value(inputs=input_tensor)
265
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
266
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
267
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
268
+
269
+ # Take the dot product between "query" and "key" to get the raw attention scores.
270
+ # (batch size, num_heads, seq_len_q, seq_len_k)
271
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
272
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
273
+ attention_scores = tf.divide(attention_scores, dk)
274
+
275
+ if attention_mask is not None:
276
+ # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function)
277
+ attention_scores = tf.add(attention_scores, attention_mask)
278
+
279
+ # Normalize the attention scores to probabilities.
280
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
281
+
282
+ # This is actually dropping out entire tokens to attend to, which might
283
+ # seem a bit unusual, but is taken from the original Transformer paper.
284
+ attention_probs = self.attention_dropout(inputs=attention_probs, training=training)
285
+
286
+ # Mask heads if we want to
287
+ if head_mask is not None:
288
+ attention_probs = tf.multiply(attention_probs, head_mask)
289
+
290
+ context_layer = tf.matmul(attention_probs, value_layer)
291
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
292
+
293
+ # (batch_size, seq_len_q, all_head_size)
294
+ context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size))
295
+ self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
296
+ hidden_states = self_outputs[0]
297
+ hidden_states = self.dense(inputs=hidden_states)
298
+ hidden_states = self.output_dropout(inputs=hidden_states, training=training)
299
+ attention_output = self.LayerNorm(inputs=hidden_states + input_tensor)
300
+
301
+ # add attentions if we output them
302
+ outputs = (attention_output,) + self_outputs[1:]
303
+
304
+ return outputs
305
+
306
+ def build(self, input_shape=None):
307
+ if self.built:
308
+ return
309
+ self.built = True
310
+ if getattr(self, "query", None) is not None:
311
+ with tf.name_scope(self.query.name):
312
+ self.query.build([None, None, self.config.hidden_size])
313
+ if getattr(self, "key", None) is not None:
314
+ with tf.name_scope(self.key.name):
315
+ self.key.build([None, None, self.config.hidden_size])
316
+ if getattr(self, "value", None) is not None:
317
+ with tf.name_scope(self.value.name):
318
+ self.value.build([None, None, self.config.hidden_size])
319
+ if getattr(self, "dense", None) is not None:
320
+ with tf.name_scope(self.dense.name):
321
+ self.dense.build([None, None, self.config.hidden_size])
322
+ if getattr(self, "LayerNorm", None) is not None:
323
+ with tf.name_scope(self.LayerNorm.name):
324
+ self.LayerNorm.build([None, None, self.config.hidden_size])
325
+
326
+
327
+ class TFAlbertLayer(keras.layers.Layer):
328
+ def __init__(self, config: AlbertConfig, **kwargs):
329
+ super().__init__(**kwargs)
330
+
331
+ self.attention = TFAlbertAttention(config, name="attention")
332
+ self.ffn = keras.layers.Dense(
333
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn"
334
+ )
335
+
336
+ if isinstance(config.hidden_act, str):
337
+ self.activation = get_tf_activation(config.hidden_act)
338
+ else:
339
+ self.activation = config.hidden_act
340
+
341
+ self.ffn_output = keras.layers.Dense(
342
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output"
343
+ )
344
+ self.full_layer_layer_norm = keras.layers.LayerNormalization(
345
+ epsilon=config.layer_norm_eps, name="full_layer_layer_norm"
346
+ )
347
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
348
+ self.config = config
349
+
350
+ def call(
351
+ self,
352
+ hidden_states: tf.Tensor,
353
+ attention_mask: tf.Tensor,
354
+ head_mask: tf.Tensor,
355
+ output_attentions: bool,
356
+ training: bool = False,
357
+ ) -> Tuple[tf.Tensor]:
358
+ attention_outputs = self.attention(
359
+ input_tensor=hidden_states,
360
+ attention_mask=attention_mask,
361
+ head_mask=head_mask,
362
+ output_attentions=output_attentions,
363
+ training=training,
364
+ )
365
+ ffn_output = self.ffn(inputs=attention_outputs[0])
366
+ ffn_output = self.activation(ffn_output)
367
+ ffn_output = self.ffn_output(inputs=ffn_output)
368
+ ffn_output = self.dropout(inputs=ffn_output, training=training)
369
+ hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0])
370
+
371
+ # add attentions if we output them
372
+ outputs = (hidden_states,) + attention_outputs[1:]
373
+
374
+ return outputs
375
+
376
+ def build(self, input_shape=None):
377
+ if self.built:
378
+ return
379
+ self.built = True
380
+ if getattr(self, "attention", None) is not None:
381
+ with tf.name_scope(self.attention.name):
382
+ self.attention.build(None)
383
+ if getattr(self, "ffn", None) is not None:
384
+ with tf.name_scope(self.ffn.name):
385
+ self.ffn.build([None, None, self.config.hidden_size])
386
+ if getattr(self, "ffn_output", None) is not None:
387
+ with tf.name_scope(self.ffn_output.name):
388
+ self.ffn_output.build([None, None, self.config.intermediate_size])
389
+ if getattr(self, "full_layer_layer_norm", None) is not None:
390
+ with tf.name_scope(self.full_layer_layer_norm.name):
391
+ self.full_layer_layer_norm.build([None, None, self.config.hidden_size])
392
+
393
+
394
+ class TFAlbertLayerGroup(keras.layers.Layer):
395
+ def __init__(self, config: AlbertConfig, **kwargs):
396
+ super().__init__(**kwargs)
397
+
398
+ self.albert_layers = [
399
+ TFAlbertLayer(config, name=f"albert_layers_._{i}") for i in range(config.inner_group_num)
400
+ ]
401
+
402
+ def call(
403
+ self,
404
+ hidden_states: tf.Tensor,
405
+ attention_mask: tf.Tensor,
406
+ head_mask: tf.Tensor,
407
+ output_attentions: bool,
408
+ output_hidden_states: bool,
409
+ training: bool = False,
410
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
411
+ layer_hidden_states = () if output_hidden_states else None
412
+ layer_attentions = () if output_attentions else None
413
+
414
+ for layer_index, albert_layer in enumerate(self.albert_layers):
415
+ if output_hidden_states:
416
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
417
+
418
+ layer_output = albert_layer(
419
+ hidden_states=hidden_states,
420
+ attention_mask=attention_mask,
421
+ head_mask=head_mask[layer_index],
422
+ output_attentions=output_attentions,
423
+ training=training,
424
+ )
425
+ hidden_states = layer_output[0]
426
+
427
+ if output_attentions:
428
+ layer_attentions = layer_attentions + (layer_output[1],)
429
+
430
+ # Add last layer
431
+ if output_hidden_states:
432
+ layer_hidden_states = layer_hidden_states + (hidden_states,)
433
+
434
+ return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None)
435
+
436
+ def build(self, input_shape=None):
437
+ if self.built:
438
+ return
439
+ self.built = True
440
+ if getattr(self, "albert_layers", None) is not None:
441
+ for layer in self.albert_layers:
442
+ with tf.name_scope(layer.name):
443
+ layer.build(None)
444
+
445
+
446
+ class TFAlbertTransformer(keras.layers.Layer):
447
+ def __init__(self, config: AlbertConfig, **kwargs):
448
+ super().__init__(**kwargs)
449
+
450
+ self.num_hidden_layers = config.num_hidden_layers
451
+ self.num_hidden_groups = config.num_hidden_groups
452
+ # Number of layers in a hidden group
453
+ self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups)
454
+ self.embedding_hidden_mapping_in = keras.layers.Dense(
455
+ units=config.hidden_size,
456
+ kernel_initializer=get_initializer(config.initializer_range),
457
+ name="embedding_hidden_mapping_in",
458
+ )
459
+ self.albert_layer_groups = [
460
+ TFAlbertLayerGroup(config, name=f"albert_layer_groups_._{i}") for i in range(config.num_hidden_groups)
461
+ ]
462
+ self.config = config
463
+
464
+ def call(
465
+ self,
466
+ hidden_states: tf.Tensor,
467
+ attention_mask: tf.Tensor,
468
+ head_mask: tf.Tensor,
469
+ output_attentions: bool,
470
+ output_hidden_states: bool,
471
+ return_dict: bool,
472
+ training: bool = False,
473
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
474
+ hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states)
475
+ all_attentions = () if output_attentions else None
476
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
477
+
478
+ for i in range(self.num_hidden_layers):
479
+ # Index of the hidden group
480
+ group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups))
481
+ layer_group_output = self.albert_layer_groups[group_idx](
482
+ hidden_states=hidden_states,
483
+ attention_mask=attention_mask,
484
+ head_mask=head_mask[group_idx * self.layers_per_group : (group_idx + 1) * self.layers_per_group],
485
+ output_attentions=output_attentions,
486
+ output_hidden_states=output_hidden_states,
487
+ training=training,
488
+ )
489
+ hidden_states = layer_group_output[0]
490
+
491
+ if output_attentions:
492
+ all_attentions = all_attentions + layer_group_output[-1]
493
+
494
+ if output_hidden_states:
495
+ all_hidden_states = all_hidden_states + (hidden_states,)
496
+
497
+ if not return_dict:
498
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
499
+
500
+ return TFBaseModelOutput(
501
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
502
+ )
503
+
504
+ def build(self, input_shape=None):
505
+ if self.built:
506
+ return
507
+ self.built = True
508
+ if getattr(self, "embedding_hidden_mapping_in", None) is not None:
509
+ with tf.name_scope(self.embedding_hidden_mapping_in.name):
510
+ self.embedding_hidden_mapping_in.build([None, None, self.config.embedding_size])
511
+ if getattr(self, "albert_layer_groups", None) is not None:
512
+ for layer in self.albert_layer_groups:
513
+ with tf.name_scope(layer.name):
514
+ layer.build(None)
515
+
516
+
517
+ class TFAlbertPreTrainedModel(TFPreTrainedModel):
518
+ """
519
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
520
+ models.
521
+ """
522
+
523
+ config_class = AlbertConfig
524
+ base_model_prefix = "albert"
525
+
526
+
527
+ class TFAlbertMLMHead(keras.layers.Layer):
528
+ def __init__(self, config: AlbertConfig, input_embeddings: keras.layers.Layer, **kwargs):
529
+ super().__init__(**kwargs)
530
+
531
+ self.config = config
532
+ self.embedding_size = config.embedding_size
533
+ self.dense = keras.layers.Dense(
534
+ config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
535
+ )
536
+ if isinstance(config.hidden_act, str):
537
+ self.activation = get_tf_activation(config.hidden_act)
538
+ else:
539
+ self.activation = config.hidden_act
540
+
541
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
542
+
543
+ # The output weights are the same as the input embeddings, but there is
544
+ # an output-only bias for each token.
545
+ self.decoder = input_embeddings
546
+
547
+ def build(self, input_shape=None):
548
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
549
+ self.decoder_bias = self.add_weight(
550
+ shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
551
+ )
552
+
553
+ if self.built:
554
+ return
555
+ self.built = True
556
+ if getattr(self, "dense", None) is not None:
557
+ with tf.name_scope(self.dense.name):
558
+ self.dense.build([None, None, self.config.hidden_size])
559
+ if getattr(self, "LayerNorm", None) is not None:
560
+ with tf.name_scope(self.LayerNorm.name):
561
+ self.LayerNorm.build([None, None, self.config.embedding_size])
562
+
563
+ def get_output_embeddings(self) -> keras.layers.Layer:
564
+ return self.decoder
565
+
566
+ def set_output_embeddings(self, value: tf.Variable):
567
+ self.decoder.weight = value
568
+ self.decoder.vocab_size = shape_list(value)[0]
569
+
570
+ def get_bias(self) -> Dict[str, tf.Variable]:
571
+ return {"bias": self.bias, "decoder_bias": self.decoder_bias}
572
+
573
+ def set_bias(self, value: tf.Variable):
574
+ self.bias = value["bias"]
575
+ self.decoder_bias = value["decoder_bias"]
576
+ self.config.vocab_size = shape_list(value["bias"])[0]
577
+
578
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
579
+ hidden_states = self.dense(inputs=hidden_states)
580
+ hidden_states = self.activation(hidden_states)
581
+ hidden_states = self.LayerNorm(inputs=hidden_states)
582
+ seq_length = shape_list(tensor=hidden_states)[1]
583
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
584
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
585
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
586
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias)
587
+
588
+ return hidden_states
589
+
590
+
591
+ @keras_serializable
592
+ class TFAlbertMainLayer(keras.layers.Layer):
593
+ config_class = AlbertConfig
594
+
595
+ def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True, **kwargs):
596
+ super().__init__(**kwargs)
597
+
598
+ self.config = config
599
+
600
+ self.embeddings = TFAlbertEmbeddings(config, name="embeddings")
601
+ self.encoder = TFAlbertTransformer(config, name="encoder")
602
+ self.pooler = (
603
+ keras.layers.Dense(
604
+ units=config.hidden_size,
605
+ kernel_initializer=get_initializer(config.initializer_range),
606
+ activation="tanh",
607
+ name="pooler",
608
+ )
609
+ if add_pooling_layer
610
+ else None
611
+ )
612
+
613
+ def get_input_embeddings(self) -> keras.layers.Layer:
614
+ return self.embeddings
615
+
616
+ def set_input_embeddings(self, value: tf.Variable):
617
+ self.embeddings.weight = value
618
+ self.embeddings.vocab_size = shape_list(value)[0]
619
+
620
+ def _prune_heads(self, heads_to_prune):
621
+ """
622
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
623
+ class PreTrainedModel
624
+ """
625
+ raise NotImplementedError
626
+
627
+ @unpack_inputs
628
+ def call(
629
+ self,
630
+ input_ids: TFModelInputType | None = None,
631
+ attention_mask: np.ndarray | tf.Tensor | None = None,
632
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
633
+ position_ids: np.ndarray | tf.Tensor | None = None,
634
+ head_mask: np.ndarray | tf.Tensor | None = None,
635
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
636
+ output_attentions: Optional[bool] = None,
637
+ output_hidden_states: Optional[bool] = None,
638
+ return_dict: Optional[bool] = None,
639
+ training: bool = False,
640
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
641
+ if input_ids is not None and inputs_embeds is not None:
642
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
643
+ elif input_ids is not None:
644
+ input_shape = shape_list(input_ids)
645
+ elif inputs_embeds is not None:
646
+ input_shape = shape_list(inputs_embeds)[:-1]
647
+ else:
648
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
649
+
650
+ if attention_mask is None:
651
+ attention_mask = tf.fill(dims=input_shape, value=1)
652
+
653
+ if token_type_ids is None:
654
+ token_type_ids = tf.fill(dims=input_shape, value=0)
655
+
656
+ embedding_output = self.embeddings(
657
+ input_ids=input_ids,
658
+ position_ids=position_ids,
659
+ token_type_ids=token_type_ids,
660
+ inputs_embeds=inputs_embeds,
661
+ training=training,
662
+ )
663
+
664
+ # We create a 3D attention mask from a 2D tensor mask.
665
+ # Sizes are [batch_size, 1, 1, to_seq_length]
666
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
667
+ # this attention mask is more simple than the triangular masking of causal attention
668
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
669
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
670
+
671
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
672
+ # masked positions, this operation will create a tensor which is 0.0 for
673
+ # positions we want to attend and -10000.0 for masked positions.
674
+ # Since we are adding it to the raw scores before the softmax, this is
675
+ # effectively the same as removing these entirely.
676
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
677
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
678
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
679
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
680
+
681
+ # Prepare head mask if needed
682
+ # 1.0 in head_mask indicate we keep the head
683
+ # attention_probs has shape bsz x n_heads x N x N
684
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
685
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
686
+ if head_mask is not None:
687
+ raise NotImplementedError
688
+ else:
689
+ head_mask = [None] * self.config.num_hidden_layers
690
+
691
+ encoder_outputs = self.encoder(
692
+ hidden_states=embedding_output,
693
+ attention_mask=extended_attention_mask,
694
+ head_mask=head_mask,
695
+ output_attentions=output_attentions,
696
+ output_hidden_states=output_hidden_states,
697
+ return_dict=return_dict,
698
+ training=training,
699
+ )
700
+
701
+ sequence_output = encoder_outputs[0]
702
+ pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None
703
+
704
+ if not return_dict:
705
+ return (
706
+ sequence_output,
707
+ pooled_output,
708
+ ) + encoder_outputs[1:]
709
+
710
+ return TFBaseModelOutputWithPooling(
711
+ last_hidden_state=sequence_output,
712
+ pooler_output=pooled_output,
713
+ hidden_states=encoder_outputs.hidden_states,
714
+ attentions=encoder_outputs.attentions,
715
+ )
716
+
717
+ def build(self, input_shape=None):
718
+ if self.built:
719
+ return
720
+ self.built = True
721
+ if getattr(self, "embeddings", None) is not None:
722
+ with tf.name_scope(self.embeddings.name):
723
+ self.embeddings.build(None)
724
+ if getattr(self, "encoder", None) is not None:
725
+ with tf.name_scope(self.encoder.name):
726
+ self.encoder.build(None)
727
+ if getattr(self, "pooler", None) is not None:
728
+ with tf.name_scope(self.pooler.name):
729
+ self.pooler.build([None, None, self.config.hidden_size])
730
+
731
+
732
+ @dataclass
733
+ class TFAlbertForPreTrainingOutput(ModelOutput):
734
+ """
735
+ Output type of [`TFAlbertForPreTraining`].
736
+
737
+ Args:
738
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
739
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
740
+ sop_logits (`tf.Tensor` of shape `(batch_size, 2)`):
741
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
742
+ before SoftMax).
743
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
744
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
745
+ `(batch_size, sequence_length, hidden_size)`.
746
+
747
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
748
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
749
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
750
+ sequence_length)`.
751
+
752
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
753
+ heads.
754
+ """
755
+
756
+ loss: tf.Tensor = None
757
+ prediction_logits: tf.Tensor = None
758
+ sop_logits: tf.Tensor = None
759
+ hidden_states: Tuple[tf.Tensor] | None = None
760
+ attentions: Tuple[tf.Tensor] | None = None
761
+
762
+
763
+ ALBERT_START_DOCSTRING = r"""
764
+
765
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
766
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
767
+ etc.)
768
+
769
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
770
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
771
+ behavior.
772
+
773
+ <Tip>
774
+
775
+ TensorFlow models and layers in `transformers` accept two formats as input:
776
+
777
+ - having all inputs as keyword arguments (like PyTorch models), or
778
+ - having all inputs as a list, tuple or dict in the first positional argument.
779
+
780
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
781
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
782
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
783
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
784
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
785
+ positional argument:
786
+
787
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
788
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
789
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
790
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
791
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
792
+
793
+ Note that when creating models and layers with
794
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
795
+ about any of this, as you can just pass inputs like you would to any other Python function!
796
+
797
+ </Tip>
798
+
799
+ Args:
800
+ config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.
801
+ Initializing with a config file does not load the weights associated with the model, only the
802
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
803
+ """
804
+
805
+ ALBERT_INPUTS_DOCSTRING = r"""
806
+ Args:
807
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
808
+ Indices of input sequence tokens in the vocabulary.
809
+
810
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
811
+ [`PreTrainedTokenizer.encode`] for details.
812
+
813
+ [What are input IDs?](../glossary#input-ids)
814
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
815
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
816
+
817
+ - 1 for tokens that are **not masked**,
818
+ - 0 for tokens that are **masked**.
819
+
820
+ [What are attention masks?](../glossary#attention-mask)
821
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
822
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
823
+ 1]`:
824
+
825
+ - 0 corresponds to a *sentence A* token,
826
+ - 1 corresponds to a *sentence B* token.
827
+
828
+ [What are token type IDs?](../glossary#token-type-ids)
829
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
830
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
831
+ config.max_position_embeddings - 1]`.
832
+
833
+ [What are position IDs?](../glossary#position-ids)
834
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
835
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
836
+
837
+ - 1 indicates the head is **not masked**,
838
+ - 0 indicates the head is **masked**.
839
+
840
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
841
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
842
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
843
+ model's internal embedding lookup matrix.
844
+ output_attentions (`bool`, *optional*):
845
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
846
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
847
+ config will be used instead.
848
+ output_hidden_states (`bool`, *optional*):
849
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
850
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
851
+ used instead.
852
+ return_dict (`bool`, *optional*):
853
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
854
+ eager mode, in graph mode the value will always be set to True.
855
+ training (`bool`, *optional*, defaults to `False`):
856
+ Whether or not to use the model in training mode (some modules like dropout modules have different
857
+ behaviors between training and evaluation).
858
+ """
859
+
860
+
861
+ @add_start_docstrings(
862
+ "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
863
+ ALBERT_START_DOCSTRING,
864
+ )
865
+ class TFAlbertModel(TFAlbertPreTrainedModel):
866
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
867
+ super().__init__(config, *inputs, **kwargs)
868
+
869
+ self.albert = TFAlbertMainLayer(config, name="albert")
870
+
871
+ @unpack_inputs
872
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
873
+ @add_code_sample_docstrings(
874
+ checkpoint=_CHECKPOINT_FOR_DOC,
875
+ output_type=TFBaseModelOutputWithPooling,
876
+ config_class=_CONFIG_FOR_DOC,
877
+ )
878
+ def call(
879
+ self,
880
+ input_ids: TFModelInputType | None = None,
881
+ attention_mask: np.ndarray | tf.Tensor | None = None,
882
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
883
+ position_ids: np.ndarray | tf.Tensor | None = None,
884
+ head_mask: np.ndarray | tf.Tensor | None = None,
885
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
886
+ output_attentions: Optional[bool] = None,
887
+ output_hidden_states: Optional[bool] = None,
888
+ return_dict: Optional[bool] = None,
889
+ training: Optional[bool] = False,
890
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
891
+ outputs = self.albert(
892
+ input_ids=input_ids,
893
+ attention_mask=attention_mask,
894
+ token_type_ids=token_type_ids,
895
+ position_ids=position_ids,
896
+ head_mask=head_mask,
897
+ inputs_embeds=inputs_embeds,
898
+ output_attentions=output_attentions,
899
+ output_hidden_states=output_hidden_states,
900
+ return_dict=return_dict,
901
+ training=training,
902
+ )
903
+
904
+ return outputs
905
+
906
+ def build(self, input_shape=None):
907
+ if self.built:
908
+ return
909
+ self.built = True
910
+ if getattr(self, "albert", None) is not None:
911
+ with tf.name_scope(self.albert.name):
912
+ self.albert.build(None)
913
+
914
+
915
+ @add_start_docstrings(
916
+ """
917
+ Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order
918
+ prediction` (classification) head.
919
+ """,
920
+ ALBERT_START_DOCSTRING,
921
+ )
922
+ class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss):
923
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
924
+ _keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"]
925
+
926
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
927
+ super().__init__(config, *inputs, **kwargs)
928
+
929
+ self.num_labels = config.num_labels
930
+
931
+ self.albert = TFAlbertMainLayer(config, name="albert")
932
+ self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
933
+ self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier")
934
+
935
+ def get_lm_head(self) -> keras.layers.Layer:
936
+ return self.predictions
937
+
938
+ @unpack_inputs
939
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
940
+ @replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
941
+ def call(
942
+ self,
943
+ input_ids: TFModelInputType | None = None,
944
+ attention_mask: np.ndarray | tf.Tensor | None = None,
945
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
946
+ position_ids: np.ndarray | tf.Tensor | None = None,
947
+ head_mask: np.ndarray | tf.Tensor | None = None,
948
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
949
+ output_attentions: Optional[bool] = None,
950
+ output_hidden_states: Optional[bool] = None,
951
+ return_dict: Optional[bool] = None,
952
+ labels: np.ndarray | tf.Tensor | None = None,
953
+ sentence_order_label: np.ndarray | tf.Tensor | None = None,
954
+ training: Optional[bool] = False,
955
+ ) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]:
956
+ r"""
957
+ Return:
958
+
959
+ Example:
960
+
961
+ ```python
962
+ >>> import tensorflow as tf
963
+ >>> from transformers import AutoTokenizer, TFAlbertForPreTraining
964
+
965
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
966
+ >>> model = TFAlbertForPreTraining.from_pretrained("albert/albert-base-v2")
967
+
968
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :]
969
+ >>> # Batch size 1
970
+ >>> outputs = model(input_ids)
971
+
972
+ >>> prediction_logits = outputs.prediction_logits
973
+ >>> sop_logits = outputs.sop_logits
974
+ ```"""
975
+
976
+ outputs = self.albert(
977
+ input_ids=input_ids,
978
+ attention_mask=attention_mask,
979
+ token_type_ids=token_type_ids,
980
+ position_ids=position_ids,
981
+ head_mask=head_mask,
982
+ inputs_embeds=inputs_embeds,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ training=training,
987
+ )
988
+ sequence_output, pooled_output = outputs[:2]
989
+ prediction_scores = self.predictions(hidden_states=sequence_output)
990
+ sop_scores = self.sop_classifier(pooled_output=pooled_output, training=training)
991
+ total_loss = None
992
+
993
+ if labels is not None and sentence_order_label is not None:
994
+ d_labels = {"labels": labels}
995
+ d_labels["sentence_order_label"] = sentence_order_label
996
+ total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores))
997
+
998
+ if not return_dict:
999
+ output = (prediction_scores, sop_scores) + outputs[2:]
1000
+ return ((total_loss,) + output) if total_loss is not None else output
1001
+
1002
+ return TFAlbertForPreTrainingOutput(
1003
+ loss=total_loss,
1004
+ prediction_logits=prediction_scores,
1005
+ sop_logits=sop_scores,
1006
+ hidden_states=outputs.hidden_states,
1007
+ attentions=outputs.attentions,
1008
+ )
1009
+
1010
+ def build(self, input_shape=None):
1011
+ if self.built:
1012
+ return
1013
+ self.built = True
1014
+ if getattr(self, "albert", None) is not None:
1015
+ with tf.name_scope(self.albert.name):
1016
+ self.albert.build(None)
1017
+ if getattr(self, "predictions", None) is not None:
1018
+ with tf.name_scope(self.predictions.name):
1019
+ self.predictions.build(None)
1020
+ if getattr(self, "sop_classifier", None) is not None:
1021
+ with tf.name_scope(self.sop_classifier.name):
1022
+ self.sop_classifier.build(None)
1023
+
1024
+
1025
+ class TFAlbertSOPHead(keras.layers.Layer):
1026
+ def __init__(self, config: AlbertConfig, **kwargs):
1027
+ super().__init__(**kwargs)
1028
+
1029
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
1030
+ self.classifier = keras.layers.Dense(
1031
+ units=config.num_labels,
1032
+ kernel_initializer=get_initializer(config.initializer_range),
1033
+ name="classifier",
1034
+ )
1035
+ self.config = config
1036
+
1037
+ def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor:
1038
+ dropout_pooled_output = self.dropout(inputs=pooled_output, training=training)
1039
+ logits = self.classifier(inputs=dropout_pooled_output)
1040
+
1041
+ return logits
1042
+
1043
+ def build(self, input_shape=None):
1044
+ if self.built:
1045
+ return
1046
+ self.built = True
1047
+ if getattr(self, "classifier", None) is not None:
1048
+ with tf.name_scope(self.classifier.name):
1049
+ self.classifier.build([None, None, self.config.hidden_size])
1050
+
1051
+
1052
+ @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING)
1053
+ class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss):
1054
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1055
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"]
1056
+
1057
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1058
+ super().__init__(config, *inputs, **kwargs)
1059
+
1060
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
1061
+ self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
1062
+
1063
+ def get_lm_head(self) -> keras.layers.Layer:
1064
+ return self.predictions
1065
+
1066
+ @unpack_inputs
1067
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1068
+ @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
1069
+ def call(
1070
+ self,
1071
+ input_ids: TFModelInputType | None = None,
1072
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1073
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1074
+ position_ids: np.ndarray | tf.Tensor | None = None,
1075
+ head_mask: np.ndarray | tf.Tensor | None = None,
1076
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1077
+ output_attentions: Optional[bool] = None,
1078
+ output_hidden_states: Optional[bool] = None,
1079
+ return_dict: Optional[bool] = None,
1080
+ labels: np.ndarray | tf.Tensor | None = None,
1081
+ training: Optional[bool] = False,
1082
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1083
+ r"""
1084
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1085
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1086
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1087
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1088
+
1089
+ Returns:
1090
+
1091
+ Example:
1092
+
1093
+ ```python
1094
+ >>> import tensorflow as tf
1095
+ >>> from transformers import AutoTokenizer, TFAlbertForMaskedLM
1096
+
1097
+ >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
1098
+ >>> model = TFAlbertForMaskedLM.from_pretrained("albert/albert-base-v2")
1099
+
1100
+ >>> # add mask_token
1101
+ >>> inputs = tokenizer(f"The capital of [MASK] is Paris.", return_tensors="tf")
1102
+ >>> logits = model(**inputs).logits
1103
+
1104
+ >>> # retrieve index of [MASK]
1105
+ >>> mask_token_index = tf.where(inputs.input_ids == tokenizer.mask_token_id)[0][1]
1106
+ >>> predicted_token_id = tf.math.argmax(logits[0, mask_token_index], axis=-1)
1107
+ >>> tokenizer.decode(predicted_token_id)
1108
+ 'france'
1109
+ ```
1110
+
1111
+ ```python
1112
+ >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"]
1113
+ >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
1114
+ >>> outputs = model(**inputs, labels=labels)
1115
+ >>> round(float(outputs.loss), 2)
1116
+ 0.81
1117
+ ```
1118
+ """
1119
+ outputs = self.albert(
1120
+ input_ids=input_ids,
1121
+ attention_mask=attention_mask,
1122
+ token_type_ids=token_type_ids,
1123
+ position_ids=position_ids,
1124
+ head_mask=head_mask,
1125
+ inputs_embeds=inputs_embeds,
1126
+ output_attentions=output_attentions,
1127
+ output_hidden_states=output_hidden_states,
1128
+ return_dict=return_dict,
1129
+ training=training,
1130
+ )
1131
+ sequence_output = outputs[0]
1132
+ prediction_scores = self.predictions(hidden_states=sequence_output, training=training)
1133
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
1134
+
1135
+ if not return_dict:
1136
+ output = (prediction_scores,) + outputs[2:]
1137
+
1138
+ return ((loss,) + output) if loss is not None else output
1139
+
1140
+ return TFMaskedLMOutput(
1141
+ loss=loss,
1142
+ logits=prediction_scores,
1143
+ hidden_states=outputs.hidden_states,
1144
+ attentions=outputs.attentions,
1145
+ )
1146
+
1147
+ def build(self, input_shape=None):
1148
+ if self.built:
1149
+ return
1150
+ self.built = True
1151
+ if getattr(self, "albert", None) is not None:
1152
+ with tf.name_scope(self.albert.name):
1153
+ self.albert.build(None)
1154
+ if getattr(self, "predictions", None) is not None:
1155
+ with tf.name_scope(self.predictions.name):
1156
+ self.predictions.build(None)
1157
+
1158
+
1159
+ @add_start_docstrings(
1160
+ """
1161
+ Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1162
+ output) e.g. for GLUE tasks.
1163
+ """,
1164
+ ALBERT_START_DOCSTRING,
1165
+ )
1166
+ class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss):
1167
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1168
+ _keys_to_ignore_on_load_unexpected = [r"predictions"]
1169
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1170
+
1171
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1172
+ super().__init__(config, *inputs, **kwargs)
1173
+
1174
+ self.num_labels = config.num_labels
1175
+
1176
+ self.albert = TFAlbertMainLayer(config, name="albert")
1177
+ self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob)
1178
+ self.classifier = keras.layers.Dense(
1179
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1180
+ )
1181
+ self.config = config
1182
+
1183
+ @unpack_inputs
1184
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1185
+ @add_code_sample_docstrings(
1186
+ checkpoint="vumichien/albert-base-v2-imdb",
1187
+ output_type=TFSequenceClassifierOutput,
1188
+ config_class=_CONFIG_FOR_DOC,
1189
+ expected_output="'LABEL_1'",
1190
+ expected_loss=0.12,
1191
+ )
1192
+ def call(
1193
+ self,
1194
+ input_ids: TFModelInputType | None = None,
1195
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1196
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1197
+ position_ids: np.ndarray | tf.Tensor | None = None,
1198
+ head_mask: np.ndarray | tf.Tensor | None = None,
1199
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1200
+ output_attentions: Optional[bool] = None,
1201
+ output_hidden_states: Optional[bool] = None,
1202
+ return_dict: Optional[bool] = None,
1203
+ labels: np.ndarray | tf.Tensor | None = None,
1204
+ training: Optional[bool] = False,
1205
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1206
+ r"""
1207
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1208
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1209
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1210
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1211
+ """
1212
+ outputs = self.albert(
1213
+ input_ids=input_ids,
1214
+ attention_mask=attention_mask,
1215
+ token_type_ids=token_type_ids,
1216
+ position_ids=position_ids,
1217
+ head_mask=head_mask,
1218
+ inputs_embeds=inputs_embeds,
1219
+ output_attentions=output_attentions,
1220
+ output_hidden_states=output_hidden_states,
1221
+ return_dict=return_dict,
1222
+ training=training,
1223
+ )
1224
+ pooled_output = outputs[1]
1225
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1226
+ logits = self.classifier(inputs=pooled_output)
1227
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1228
+
1229
+ if not return_dict:
1230
+ output = (logits,) + outputs[2:]
1231
+
1232
+ return ((loss,) + output) if loss is not None else output
1233
+
1234
+ return TFSequenceClassifierOutput(
1235
+ loss=loss,
1236
+ logits=logits,
1237
+ hidden_states=outputs.hidden_states,
1238
+ attentions=outputs.attentions,
1239
+ )
1240
+
1241
+ def build(self, input_shape=None):
1242
+ if self.built:
1243
+ return
1244
+ self.built = True
1245
+ if getattr(self, "albert", None) is not None:
1246
+ with tf.name_scope(self.albert.name):
1247
+ self.albert.build(None)
1248
+ if getattr(self, "classifier", None) is not None:
1249
+ with tf.name_scope(self.classifier.name):
1250
+ self.classifier.build([None, None, self.config.hidden_size])
1251
+
1252
+
1253
+ @add_start_docstrings(
1254
+ """
1255
+ Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1256
+ Named-Entity-Recognition (NER) tasks.
1257
+ """,
1258
+ ALBERT_START_DOCSTRING,
1259
+ )
1260
+ class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss):
1261
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1262
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
1263
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1264
+
1265
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1266
+ super().__init__(config, *inputs, **kwargs)
1267
+
1268
+ self.num_labels = config.num_labels
1269
+
1270
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
1271
+ classifier_dropout_prob = (
1272
+ config.classifier_dropout_prob
1273
+ if config.classifier_dropout_prob is not None
1274
+ else config.hidden_dropout_prob
1275
+ )
1276
+ self.dropout = keras.layers.Dropout(rate=classifier_dropout_prob)
1277
+ self.classifier = keras.layers.Dense(
1278
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1279
+ )
1280
+ self.config = config
1281
+
1282
+ @unpack_inputs
1283
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1284
+ @add_code_sample_docstrings(
1285
+ checkpoint=_CHECKPOINT_FOR_DOC,
1286
+ output_type=TFTokenClassifierOutput,
1287
+ config_class=_CONFIG_FOR_DOC,
1288
+ )
1289
+ def call(
1290
+ self,
1291
+ input_ids: TFModelInputType | None = None,
1292
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1293
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1294
+ position_ids: np.ndarray | tf.Tensor | None = None,
1295
+ head_mask: np.ndarray | tf.Tensor | None = None,
1296
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1297
+ output_attentions: Optional[bool] = None,
1298
+ output_hidden_states: Optional[bool] = None,
1299
+ return_dict: Optional[bool] = None,
1300
+ labels: np.ndarray | tf.Tensor | None = None,
1301
+ training: Optional[bool] = False,
1302
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1303
+ r"""
1304
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1305
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1306
+ """
1307
+ outputs = self.albert(
1308
+ input_ids=input_ids,
1309
+ attention_mask=attention_mask,
1310
+ token_type_ids=token_type_ids,
1311
+ position_ids=position_ids,
1312
+ head_mask=head_mask,
1313
+ inputs_embeds=inputs_embeds,
1314
+ output_attentions=output_attentions,
1315
+ output_hidden_states=output_hidden_states,
1316
+ return_dict=return_dict,
1317
+ training=training,
1318
+ )
1319
+ sequence_output = outputs[0]
1320
+ sequence_output = self.dropout(inputs=sequence_output, training=training)
1321
+ logits = self.classifier(inputs=sequence_output)
1322
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
1323
+
1324
+ if not return_dict:
1325
+ output = (logits,) + outputs[2:]
1326
+
1327
+ return ((loss,) + output) if loss is not None else output
1328
+
1329
+ return TFTokenClassifierOutput(
1330
+ loss=loss,
1331
+ logits=logits,
1332
+ hidden_states=outputs.hidden_states,
1333
+ attentions=outputs.attentions,
1334
+ )
1335
+
1336
+ def build(self, input_shape=None):
1337
+ if self.built:
1338
+ return
1339
+ self.built = True
1340
+ if getattr(self, "albert", None) is not None:
1341
+ with tf.name_scope(self.albert.name):
1342
+ self.albert.build(None)
1343
+ if getattr(self, "classifier", None) is not None:
1344
+ with tf.name_scope(self.classifier.name):
1345
+ self.classifier.build([None, None, self.config.hidden_size])
1346
+
1347
+
1348
+ @add_start_docstrings(
1349
+ """
1350
+ Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1351
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1352
+ """,
1353
+ ALBERT_START_DOCSTRING,
1354
+ )
1355
+ class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss):
1356
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1357
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
1358
+
1359
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1360
+ super().__init__(config, *inputs, **kwargs)
1361
+
1362
+ self.num_labels = config.num_labels
1363
+
1364
+ self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
1365
+ self.qa_outputs = keras.layers.Dense(
1366
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1367
+ )
1368
+ self.config = config
1369
+
1370
+ @unpack_inputs
1371
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1372
+ @add_code_sample_docstrings(
1373
+ checkpoint="vumichien/albert-base-v2-squad2",
1374
+ output_type=TFQuestionAnsweringModelOutput,
1375
+ config_class=_CONFIG_FOR_DOC,
1376
+ qa_target_start_index=12,
1377
+ qa_target_end_index=13,
1378
+ expected_output="'a nice puppet'",
1379
+ expected_loss=7.36,
1380
+ )
1381
+ def call(
1382
+ self,
1383
+ input_ids: TFModelInputType | None = None,
1384
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1385
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1386
+ position_ids: np.ndarray | tf.Tensor | None = None,
1387
+ head_mask: np.ndarray | tf.Tensor | None = None,
1388
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1389
+ output_attentions: Optional[bool] = None,
1390
+ output_hidden_states: Optional[bool] = None,
1391
+ return_dict: Optional[bool] = None,
1392
+ start_positions: np.ndarray | tf.Tensor | None = None,
1393
+ end_positions: np.ndarray | tf.Tensor | None = None,
1394
+ training: Optional[bool] = False,
1395
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1396
+ r"""
1397
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1398
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1399
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1400
+ are not taken into account for computing the loss.
1401
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1402
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1403
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1404
+ are not taken into account for computing the loss.
1405
+ """
1406
+ outputs = self.albert(
1407
+ input_ids=input_ids,
1408
+ attention_mask=attention_mask,
1409
+ token_type_ids=token_type_ids,
1410
+ position_ids=position_ids,
1411
+ head_mask=head_mask,
1412
+ inputs_embeds=inputs_embeds,
1413
+ output_attentions=output_attentions,
1414
+ output_hidden_states=output_hidden_states,
1415
+ return_dict=return_dict,
1416
+ training=training,
1417
+ )
1418
+ sequence_output = outputs[0]
1419
+ logits = self.qa_outputs(inputs=sequence_output)
1420
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
1421
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
1422
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
1423
+ loss = None
1424
+
1425
+ if start_positions is not None and end_positions is not None:
1426
+ labels = {"start_position": start_positions}
1427
+ labels["end_position"] = end_positions
1428
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
1429
+
1430
+ if not return_dict:
1431
+ output = (start_logits, end_logits) + outputs[2:]
1432
+
1433
+ return ((loss,) + output) if loss is not None else output
1434
+
1435
+ return TFQuestionAnsweringModelOutput(
1436
+ loss=loss,
1437
+ start_logits=start_logits,
1438
+ end_logits=end_logits,
1439
+ hidden_states=outputs.hidden_states,
1440
+ attentions=outputs.attentions,
1441
+ )
1442
+
1443
+ def build(self, input_shape=None):
1444
+ if self.built:
1445
+ return
1446
+ self.built = True
1447
+ if getattr(self, "albert", None) is not None:
1448
+ with tf.name_scope(self.albert.name):
1449
+ self.albert.build(None)
1450
+ if getattr(self, "qa_outputs", None) is not None:
1451
+ with tf.name_scope(self.qa_outputs.name):
1452
+ self.qa_outputs.build([None, None, self.config.hidden_size])
1453
+
1454
+
1455
+ @add_start_docstrings(
1456
+ """
1457
+ Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1458
+ softmax) e.g. for RocStories/SWAG tasks.
1459
+ """,
1460
+ ALBERT_START_DOCSTRING,
1461
+ )
1462
+ class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss):
1463
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
1464
+ _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
1465
+ _keys_to_ignore_on_load_missing = [r"dropout"]
1466
+
1467
+ def __init__(self, config: AlbertConfig, *inputs, **kwargs):
1468
+ super().__init__(config, *inputs, **kwargs)
1469
+
1470
+ self.albert = TFAlbertMainLayer(config, name="albert")
1471
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
1472
+ self.classifier = keras.layers.Dense(
1473
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1474
+ )
1475
+ self.config = config
1476
+
1477
+ @unpack_inputs
1478
+ @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1479
+ @add_code_sample_docstrings(
1480
+ checkpoint=_CHECKPOINT_FOR_DOC,
1481
+ output_type=TFMultipleChoiceModelOutput,
1482
+ config_class=_CONFIG_FOR_DOC,
1483
+ )
1484
+ def call(
1485
+ self,
1486
+ input_ids: TFModelInputType | None = None,
1487
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1488
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1489
+ position_ids: np.ndarray | tf.Tensor | None = None,
1490
+ head_mask: np.ndarray | tf.Tensor | None = None,
1491
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1492
+ output_attentions: Optional[bool] = None,
1493
+ output_hidden_states: Optional[bool] = None,
1494
+ return_dict: Optional[bool] = None,
1495
+ labels: np.ndarray | tf.Tensor | None = None,
1496
+ training: Optional[bool] = False,
1497
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1498
+ r"""
1499
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1500
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1501
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1502
+ """
1503
+
1504
+ if input_ids is not None:
1505
+ num_choices = shape_list(input_ids)[1]
1506
+ seq_length = shape_list(input_ids)[2]
1507
+ else:
1508
+ num_choices = shape_list(inputs_embeds)[1]
1509
+ seq_length = shape_list(inputs_embeds)[2]
1510
+
1511
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1512
+ flat_attention_mask = (
1513
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
1514
+ )
1515
+ flat_token_type_ids = (
1516
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
1517
+ )
1518
+ flat_position_ids = (
1519
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
1520
+ )
1521
+ flat_inputs_embeds = (
1522
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
1523
+ if inputs_embeds is not None
1524
+ else None
1525
+ )
1526
+ outputs = self.albert(
1527
+ input_ids=flat_input_ids,
1528
+ attention_mask=flat_attention_mask,
1529
+ token_type_ids=flat_token_type_ids,
1530
+ position_ids=flat_position_ids,
1531
+ head_mask=head_mask,
1532
+ inputs_embeds=flat_inputs_embeds,
1533
+ output_attentions=output_attentions,
1534
+ output_hidden_states=output_hidden_states,
1535
+ return_dict=return_dict,
1536
+ training=training,
1537
+ )
1538
+ pooled_output = outputs[1]
1539
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
1540
+ logits = self.classifier(inputs=pooled_output)
1541
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
1542
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
1543
+
1544
+ if not return_dict:
1545
+ output = (reshaped_logits,) + outputs[2:]
1546
+ return ((loss,) + output) if loss is not None else output
1547
+
1548
+ return TFMultipleChoiceModelOutput(
1549
+ loss=loss,
1550
+ logits=reshaped_logits,
1551
+ hidden_states=outputs.hidden_states,
1552
+ attentions=outputs.attentions,
1553
+ )
1554
+
1555
+ def build(self, input_shape=None):
1556
+ if self.built:
1557
+ return
1558
+ self.built = True
1559
+ if getattr(self, "albert", None) is not None:
1560
+ with tf.name_scope(self.albert.name):
1561
+ self.albert.build(None)
1562
+ if getattr(self, "classifier", None) is not None:
1563
+ with tf.name_scope(self.classifier.name):
1564
+ self.classifier.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization classes for ALBERT model."""
16
+
17
+
18
+ import os
19
+ import unicodedata
20
+ from shutil import copyfile
21
+ from typing import Any, Dict, List, Optional, Tuple
22
+
23
+ import sentencepiece as spm
24
+
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
31
+
32
+
33
+ SPIECE_UNDERLINE = "▁"
34
+
35
+
36
+ class AlbertTokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
39
+
40
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
41
+ this superclass for more information regarding those methods.
42
+
43
+ Args:
44
+ vocab_file (`str`):
45
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
46
+ contains the vocabulary necessary to instantiate a tokenizer.
47
+ do_lower_case (`bool`, *optional*, defaults to `True`):
48
+ Whether or not to lowercase the input when tokenizing.
49
+ remove_space (`bool`, *optional*, defaults to `True`):
50
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
51
+ keep_accents (`bool`, *optional*, defaults to `False`):
52
+ Whether or not to keep accents when tokenizing.
53
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
54
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
55
+
56
+ <Tip>
57
+
58
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
59
+ sequence. The token used is the `cls_token`.
60
+
61
+ </Tip>
62
+
63
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
64
+ The end of sequence token.
65
+
66
+ <Tip>
67
+
68
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
69
+ The token used is the `sep_token`.
70
+
71
+ </Tip>
72
+
73
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
74
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
75
+ token instead.
76
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
77
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
78
+ sequence classification or for a text and a question for question answering. It is also used as the last
79
+ token of a sequence built with special tokens.
80
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
81
+ The token used for padding, for example when batching sequences of different lengths.
82
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
83
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
84
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
85
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
86
+ The token used for masking values. This is the token used when training this model with masked language
87
+ modeling. This is the token which the model will try to predict.
88
+ sp_model_kwargs (`dict`, *optional*):
89
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
90
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
91
+ to set:
92
+
93
+ - `enable_sampling`: Enable subword regularization.
94
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
95
+
96
+ - `nbest_size = {0,1}`: No sampling is performed.
97
+ - `nbest_size > 1`: samples from the nbest_size results.
98
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
99
+ using forward-filtering-and-backward-sampling algorithm.
100
+
101
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
102
+ BPE-dropout.
103
+
104
+ Attributes:
105
+ sp_model (`SentencePieceProcessor`):
106
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
107
+ """
108
+
109
+ vocab_files_names = VOCAB_FILES_NAMES
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_file,
114
+ do_lower_case=True,
115
+ remove_space=True,
116
+ keep_accents=False,
117
+ bos_token="[CLS]",
118
+ eos_token="[SEP]",
119
+ unk_token="<unk>",
120
+ sep_token="[SEP]",
121
+ pad_token="<pad>",
122
+ cls_token="[CLS]",
123
+ mask_token="[MASK]",
124
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
125
+ **kwargs,
126
+ ) -> None:
127
+ # Mask token behave like a normal word, i.e. include the space before it and
128
+ # is included in the raw text, there should be a match in a non-normalized sentence.
129
+ mask_token = (
130
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
131
+ if isinstance(mask_token, str)
132
+ else mask_token
133
+ )
134
+
135
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
136
+
137
+ self.do_lower_case = do_lower_case
138
+ self.remove_space = remove_space
139
+ self.keep_accents = keep_accents
140
+ self.vocab_file = vocab_file
141
+
142
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
143
+ self.sp_model.Load(vocab_file)
144
+
145
+ super().__init__(
146
+ do_lower_case=do_lower_case,
147
+ remove_space=remove_space,
148
+ keep_accents=keep_accents,
149
+ bos_token=bos_token,
150
+ eos_token=eos_token,
151
+ unk_token=unk_token,
152
+ sep_token=sep_token,
153
+ pad_token=pad_token,
154
+ cls_token=cls_token,
155
+ mask_token=mask_token,
156
+ sp_model_kwargs=self.sp_model_kwargs,
157
+ **kwargs,
158
+ )
159
+
160
+ @property
161
+ def vocab_size(self) -> int:
162
+ return len(self.sp_model)
163
+
164
+ def get_vocab(self) -> Dict[str, int]:
165
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
166
+ vocab.update(self.added_tokens_encoder)
167
+ return vocab
168
+
169
+ def __getstate__(self):
170
+ state = self.__dict__.copy()
171
+ state["sp_model"] = None
172
+ return state
173
+
174
+ def __setstate__(self, d):
175
+ self.__dict__ = d
176
+
177
+ # for backward compatibility
178
+ if not hasattr(self, "sp_model_kwargs"):
179
+ self.sp_model_kwargs = {}
180
+
181
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
182
+ self.sp_model.Load(self.vocab_file)
183
+
184
+ def preprocess_text(self, inputs):
185
+ if self.remove_space:
186
+ outputs = " ".join(inputs.strip().split())
187
+ else:
188
+ outputs = inputs
189
+ outputs = outputs.replace("``", '"').replace("''", '"')
190
+
191
+ if not self.keep_accents:
192
+ outputs = unicodedata.normalize("NFKD", outputs)
193
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
194
+ if self.do_lower_case:
195
+ outputs = outputs.lower()
196
+
197
+ return outputs
198
+
199
+ def _tokenize(self, text: str) -> List[str]:
200
+ """Tokenize a string."""
201
+ text = self.preprocess_text(text)
202
+ pieces = self.sp_model.encode(text, out_type=str)
203
+ new_pieces = []
204
+ for piece in pieces:
205
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
206
+ # Logic to handle special cases see https://github.com/google-research/bert/blob/master/README.md#tokenization
207
+ # `9,9` -> ['▁9', ',', '9'] instead of [`_9,`, '9']
208
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
209
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
210
+ if len(cur_pieces[0]) == 1:
211
+ cur_pieces = cur_pieces[1:]
212
+ else:
213
+ cur_pieces[0] = cur_pieces[0][1:]
214
+ cur_pieces.append(piece[-1])
215
+ new_pieces.extend(cur_pieces)
216
+ else:
217
+ new_pieces.append(piece)
218
+
219
+ return new_pieces
220
+
221
+ def _convert_token_to_id(self, token):
222
+ """Converts a token (str) in an id using the vocab."""
223
+ return self.sp_model.PieceToId(token)
224
+
225
+ def _convert_id_to_token(self, index):
226
+ """Converts an index (integer) in a token (str) using the vocab."""
227
+ return self.sp_model.IdToPiece(index)
228
+
229
+ def convert_tokens_to_string(self, tokens):
230
+ """Converts a sequence of tokens (string) in a single string."""
231
+ current_sub_tokens = []
232
+ out_string = ""
233
+ prev_is_special = False
234
+ for token in tokens:
235
+ # make sure that special tokens are not decoded using sentencepiece model
236
+ if token in self.all_special_tokens:
237
+ if not prev_is_special:
238
+ out_string += " "
239
+ out_string += self.sp_model.decode(current_sub_tokens) + token
240
+ prev_is_special = True
241
+ current_sub_tokens = []
242
+ else:
243
+ current_sub_tokens.append(token)
244
+ prev_is_special = False
245
+ out_string += self.sp_model.decode(current_sub_tokens)
246
+ return out_string.strip()
247
+
248
+ def build_inputs_with_special_tokens(
249
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
250
+ ) -> List[int]:
251
+ """
252
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
253
+ adding special tokens. An ALBERT sequence has the following format:
254
+
255
+ - single sequence: `[CLS] X [SEP]`
256
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
257
+
258
+ Args:
259
+ token_ids_0 (`List[int]`):
260
+ List of IDs to which the special tokens will be added.
261
+ token_ids_1 (`List[int]`, *optional*):
262
+ Optional second list of IDs for sequence pairs.
263
+
264
+ Returns:
265
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
266
+ """
267
+ sep = [self.sep_token_id]
268
+ cls = [self.cls_token_id]
269
+ if token_ids_1 is None:
270
+ return cls + token_ids_0 + sep
271
+ return cls + token_ids_0 + sep + token_ids_1 + sep
272
+
273
+ def get_special_tokens_mask(
274
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
275
+ ) -> List[int]:
276
+ """
277
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
278
+ special tokens using the tokenizer `prepare_for_model` method.
279
+
280
+ Args:
281
+ token_ids_0 (`List[int]`):
282
+ List of IDs.
283
+ token_ids_1 (`List[int]`, *optional*):
284
+ Optional second list of IDs for sequence pairs.
285
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
286
+ Whether or not the token list is already formatted with special tokens for the model.
287
+
288
+ Returns:
289
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
290
+ """
291
+
292
+ if already_has_special_tokens:
293
+ return super().get_special_tokens_mask(
294
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
295
+ )
296
+
297
+ if token_ids_1 is not None:
298
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
299
+ return [1] + ([0] * len(token_ids_0)) + [1]
300
+
301
+ def create_token_type_ids_from_sequences(
302
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
303
+ ) -> List[int]:
304
+ """
305
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
306
+ sequence pair mask has the following format:
307
+
308
+ ```
309
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
310
+ | first sequence | second sequence |
311
+ ```
312
+
313
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
314
+
315
+ Args:
316
+ token_ids_0 (`List[int]`):
317
+ List of IDs.
318
+ token_ids_1 (`List[int]`, *optional*):
319
+ Optional second list of IDs for sequence pairs.
320
+
321
+ Returns:
322
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
323
+ """
324
+ sep = [self.sep_token_id]
325
+ cls = [self.cls_token_id]
326
+
327
+ if token_ids_1 is None:
328
+ return len(cls + token_ids_0 + sep) * [0]
329
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
330
+
331
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
332
+ if not os.path.isdir(save_directory):
333
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
334
+ return
335
+ out_vocab_file = os.path.join(
336
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
337
+ )
338
+
339
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
340
+ copyfile(self.vocab_file, out_vocab_file)
341
+ elif not os.path.isfile(self.vocab_file):
342
+ with open(out_vocab_file, "wb") as fi:
343
+ content_spiece_model = self.sp_model.serialized_model_proto()
344
+ fi.write(content_spiece_model)
345
+
346
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/albert/tokenization_albert_fast.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization classes for ALBERT model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_albert import AlbertTokenizer
29
+ else:
30
+ AlbertTokenizer = None
31
+
32
+ logger = logging.get_logger(__name__)
33
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
34
+
35
+
36
+ SPIECE_UNDERLINE = "▁"
37
+
38
+
39
+ class AlbertTokenizerFast(PreTrainedTokenizerFast):
40
+ """
41
+ Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on
42
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
43
+ tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
44
+ this superclass for more information regarding those methods
45
+
46
+ Args:
47
+ vocab_file (`str`):
48
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
49
+ contains the vocabulary necessary to instantiate a tokenizer.
50
+ do_lower_case (`bool`, *optional*, defaults to `True`):
51
+ Whether or not to lowercase the input when tokenizing.
52
+ remove_space (`bool`, *optional*, defaults to `True`):
53
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
54
+ keep_accents (`bool`, *optional*, defaults to `False`):
55
+ Whether or not to keep accents when tokenizing.
56
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
57
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
58
+
59
+ <Tip>
60
+
61
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
62
+ sequence. The token used is the `cls_token`.
63
+
64
+ </Tip>
65
+
66
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
67
+ The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
68
+ that is used for the end of sequence. The token used is the `sep_token`.
69
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
73
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
74
+ sequence classification or for a text and a question for question answering. It is also used as the last
75
+ token of a sequence built with special tokens.
76
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
77
+ The token used for padding, for example when batching sequences of different lengths.
78
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
79
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
80
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
81
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
82
+ The token used for masking values. This is the token used when training this model with masked language
83
+ modeling. This is the token which the model will try to predict.
84
+ """
85
+
86
+ vocab_files_names = VOCAB_FILES_NAMES
87
+ slow_tokenizer_class = AlbertTokenizer
88
+
89
+ def __init__(
90
+ self,
91
+ vocab_file=None,
92
+ tokenizer_file=None,
93
+ do_lower_case=True,
94
+ remove_space=True,
95
+ keep_accents=False,
96
+ bos_token="[CLS]",
97
+ eos_token="[SEP]",
98
+ unk_token="<unk>",
99
+ sep_token="[SEP]",
100
+ pad_token="<pad>",
101
+ cls_token="[CLS]",
102
+ mask_token="[MASK]",
103
+ **kwargs,
104
+ ):
105
+ # Mask token behave like a normal word, i.e. include the space before it and
106
+ # is included in the raw text, there should be a match in a non-normalized sentence.
107
+ mask_token = (
108
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
109
+ if isinstance(mask_token, str)
110
+ else mask_token
111
+ )
112
+
113
+ super().__init__(
114
+ vocab_file,
115
+ tokenizer_file=tokenizer_file,
116
+ do_lower_case=do_lower_case,
117
+ remove_space=remove_space,
118
+ keep_accents=keep_accents,
119
+ bos_token=bos_token,
120
+ eos_token=eos_token,
121
+ unk_token=unk_token,
122
+ sep_token=sep_token,
123
+ pad_token=pad_token,
124
+ cls_token=cls_token,
125
+ mask_token=mask_token,
126
+ **kwargs,
127
+ )
128
+
129
+ self.do_lower_case = do_lower_case
130
+ self.remove_space = remove_space
131
+ self.keep_accents = keep_accents
132
+ self.vocab_file = vocab_file
133
+
134
+ @property
135
+ def can_save_slow_tokenizer(self) -> bool:
136
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
137
+
138
+ def build_inputs_with_special_tokens(
139
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
140
+ ) -> List[int]:
141
+ """
142
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
143
+ adding special tokens. An ALBERT sequence has the following format:
144
+
145
+ - single sequence: `[CLS] X [SEP]`
146
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
147
+
148
+ Args:
149
+ token_ids_0 (`List[int]`):
150
+ List of IDs to which the special tokens will be added
151
+ token_ids_1 (`List[int]`, *optional*):
152
+ Optional second list of IDs for sequence pairs.
153
+
154
+ Returns:
155
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
156
+ """
157
+ sep = [self.sep_token_id]
158
+ cls = [self.cls_token_id]
159
+ if token_ids_1 is None:
160
+ return cls + token_ids_0 + sep
161
+ return cls + token_ids_0 + sep + token_ids_1 + sep
162
+
163
+ def create_token_type_ids_from_sequences(
164
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
165
+ ) -> List[int]:
166
+ """
167
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
168
+ sequence pair mask has the following format:
169
+
170
+ ```
171
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
172
+ | first sequence | second sequence |
173
+ ```
174
+
175
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
176
+
177
+ Args:
178
+ token_ids_0 (`List[int]`):
179
+ List of ids.
180
+ token_ids_1 (`List[int]`, *optional*):
181
+ Optional second list of IDs for sequence pairs.
182
+
183
+ Returns:
184
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
185
+ """
186
+ sep = [self.sep_token_id]
187
+ cls = [self.cls_token_id]
188
+
189
+ if token_ids_1 is None:
190
+ return len(cls + token_ids_0 + sep) * [0]
191
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
192
+
193
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
194
+ if not self.can_save_slow_tokenizer:
195
+ raise ValueError(
196
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
197
+ "tokenizer."
198
+ )
199
+
200
+ if not os.path.isdir(save_directory):
201
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
202
+ return
203
+ out_vocab_file = os.path.join(
204
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
205
+ )
206
+
207
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
208
+ copyfile(self.vocab_file, out_vocab_file)
209
+
210
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_blip_2": [
21
+ "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "Blip2Config",
23
+ "Blip2QFormerConfig",
24
+ "Blip2VisionConfig",
25
+ ],
26
+ "processing_blip_2": ["Blip2Processor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_blip_2"] = [
36
+ "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "Blip2Model",
38
+ "Blip2QFormerModel",
39
+ "Blip2PreTrainedModel",
40
+ "Blip2ForConditionalGeneration",
41
+ "Blip2VisionModel",
42
+ ]
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_blip_2 import (
46
+ BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ Blip2Config,
48
+ Blip2QFormerConfig,
49
+ Blip2VisionConfig,
50
+ )
51
+ from .processing_blip_2 import Blip2Processor
52
+
53
+ try:
54
+ if not is_torch_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .modeling_blip_2 import (
60
+ BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
61
+ Blip2ForConditionalGeneration,
62
+ Blip2Model,
63
+ Blip2PreTrainedModel,
64
+ Blip2QFormerModel,
65
+ Blip2VisionModel,
66
+ )
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/configuration_blip_2.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/convert_blip_2_original_to_pytorch.cpython-310.pyc ADDED
Binary file (7.97 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/modeling_blip_2.cpython-310.pyc ADDED
Binary file (55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/__pycache__/processing_blip_2.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/configuration_blip_2.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BLIP-2 model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
22
+ from ...utils import logging
23
+ from ..auto import CONFIG_MAPPING
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class Blip2VisionConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`Blip2VisionModel`]. It is used to instantiate a
35
+ BLIP-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration defaults will yield a similar configuration to that of the BLIP-2
37
+ [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+ Args:
43
+ hidden_size (`int`, *optional*, defaults to 1408):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ intermediate_size (`int`, *optional*, defaults to 6144):
46
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
47
+ num_hidden_layers (`int`, *optional*, defaults to 39):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 16):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ image_size (`int`, *optional*, defaults to 224):
52
+ The size (resolution) of each image.
53
+ patch_size (`int`, *optional*, defaults to 14):
54
+ The size (resolution) of each patch.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults
58
+ to 1e-5): The epsilon used by the layer normalization layers.
59
+ attention_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for the attention probabilities.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ qkv_bias (`bool`, *optional*, defaults to `True`):
64
+ Whether to add a bias to the queries and values in the self-attention layers.
65
+
66
+ Example:
67
+
68
+ ```python
69
+ >>> from transformers import Blip2VisionConfig, Blip2VisionModel
70
+
71
+ >>> # Initializing a Blip2VisionConfig with Salesforce/blip2-opt-2.7b style configuration
72
+ >>> configuration = Blip2VisionConfig()
73
+
74
+ >>> # Initializing a Blip2VisionModel (with random weights) from the Salesforce/blip2-opt-2.7b style configuration
75
+ >>> model = Blip2VisionModel(configuration)
76
+
77
+ >>> # Accessing the model configuration
78
+ >>> configuration = model.config
79
+ ```"""
80
+
81
+ model_type = "blip_2_vision_model"
82
+
83
+ def __init__(
84
+ self,
85
+ hidden_size=1408,
86
+ intermediate_size=6144,
87
+ num_hidden_layers=39,
88
+ num_attention_heads=16,
89
+ image_size=224,
90
+ patch_size=14,
91
+ hidden_act="gelu",
92
+ layer_norm_eps=1e-6,
93
+ attention_dropout=0.0,
94
+ initializer_range=1e-10,
95
+ qkv_bias=True,
96
+ **kwargs,
97
+ ):
98
+ super().__init__(**kwargs)
99
+
100
+ self.hidden_size = hidden_size
101
+ self.intermediate_size = intermediate_size
102
+ self.num_hidden_layers = num_hidden_layers
103
+ self.num_attention_heads = num_attention_heads
104
+ self.patch_size = patch_size
105
+ self.image_size = image_size
106
+ self.initializer_range = initializer_range
107
+ self.attention_dropout = attention_dropout
108
+ self.layer_norm_eps = layer_norm_eps
109
+ self.hidden_act = hidden_act
110
+ self.qkv_bias = qkv_bias
111
+
112
+ @classmethod
113
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
114
+ cls._set_token_in_kwargs(kwargs)
115
+
116
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
117
+
118
+ # get the vision config dict if we are loading from Blip2Config
119
+ if config_dict.get("model_type") == "blip-2":
120
+ config_dict = config_dict["vision_config"]
121
+
122
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
123
+ logger.warning(
124
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
125
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
126
+ )
127
+
128
+ return cls.from_dict(config_dict, **kwargs)
129
+
130
+
131
+ class Blip2QFormerConfig(PretrainedConfig):
132
+ r"""
133
+ This is the configuration class to store the configuration of a [`Blip2QFormerModel`]. It is used to instantiate a
134
+ BLIP-2 Querying Transformer (Q-Former) model according to the specified arguments, defining the model architecture.
135
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2
136
+ [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects
137
+ inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
138
+ [`PretrainedConfig`] for more information.
139
+
140
+ Note that [`Blip2QFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
141
+
142
+ Args:
143
+ vocab_size (`int`, *optional*, defaults to 30522):
144
+ Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
145
+ the `inputs_ids` passed when calling the model.
146
+ hidden_size (`int`, *optional*, defaults to 768):
147
+ Dimensionality of the encoder layers and the pooler layer.
148
+ num_hidden_layers (`int`, *optional*, defaults to 12):
149
+ Number of hidden layers in the Transformer encoder.
150
+ num_attention_heads (`int`, *optional*, defaults to 12):
151
+ Number of attention heads for each attention layer in the Transformer encoder.
152
+ intermediate_size (`int`, *optional*, defaults to 3072):
153
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
154
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
155
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
156
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
157
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
158
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
159
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
160
+ The dropout ratio for the attention probabilities.
161
+ max_position_embeddings (`int`, *optional*, defaults to 512):
162
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
163
+ just in case (e.g., 512 or 1024 or 2048).
164
+ initializer_range (`float`, *optional*, defaults to 0.02):
165
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
166
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
167
+ The epsilon used by the layer normalization layers.
168
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
169
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
170
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
171
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
172
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
173
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
174
+ cross_attention_frequency (`int`, *optional*, defaults to 2):
175
+ The frequency of adding cross-attention to the Transformer layers.
176
+ encoder_hidden_size (`int`, *optional*, defaults to 1408):
177
+ The hidden size of the hidden states for cross-attention.
178
+
179
+ Examples:
180
+
181
+ ```python
182
+ >>> from transformers import Blip2QFormerConfig, Blip2QFormerModel
183
+
184
+ >>> # Initializing a BLIP-2 Salesforce/blip2-opt-2.7b style configuration
185
+ >>> configuration = Blip2QFormerConfig()
186
+
187
+ >>> # Initializing a model (with random weights) from the Salesforce/blip2-opt-2.7b style configuration
188
+ >>> model = Blip2QFormerModel(configuration)
189
+ >>> # Accessing the model configuration
190
+ >>> configuration = model.config
191
+ ```"""
192
+
193
+ model_type = "blip_2_qformer"
194
+
195
+ def __init__(
196
+ self,
197
+ vocab_size=30522,
198
+ hidden_size=768,
199
+ num_hidden_layers=12,
200
+ num_attention_heads=12,
201
+ intermediate_size=3072,
202
+ hidden_act="gelu",
203
+ hidden_dropout_prob=0.1,
204
+ attention_probs_dropout_prob=0.1,
205
+ max_position_embeddings=512,
206
+ initializer_range=0.02,
207
+ layer_norm_eps=1e-12,
208
+ pad_token_id=0,
209
+ position_embedding_type="absolute",
210
+ cross_attention_frequency=2,
211
+ encoder_hidden_size=1408,
212
+ **kwargs,
213
+ ):
214
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
215
+
216
+ self.vocab_size = vocab_size
217
+ self.hidden_size = hidden_size
218
+ self.num_hidden_layers = num_hidden_layers
219
+ self.num_attention_heads = num_attention_heads
220
+ self.hidden_act = hidden_act
221
+ self.intermediate_size = intermediate_size
222
+ self.hidden_dropout_prob = hidden_dropout_prob
223
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
224
+ self.max_position_embeddings = max_position_embeddings
225
+ self.initializer_range = initializer_range
226
+ self.layer_norm_eps = layer_norm_eps
227
+ self.position_embedding_type = position_embedding_type
228
+ self.cross_attention_frequency = cross_attention_frequency
229
+ self.encoder_hidden_size = encoder_hidden_size
230
+
231
+ @classmethod
232
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
233
+ cls._set_token_in_kwargs(kwargs)
234
+
235
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
236
+
237
+ # get the qformer config dict if we are loading from Blip2Config
238
+ if config_dict.get("model_type") == "blip-2":
239
+ config_dict = config_dict["qformer_config"]
240
+
241
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
242
+ logger.warning(
243
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
244
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
245
+ )
246
+
247
+ return cls.from_dict(config_dict, **kwargs)
248
+
249
+
250
+ class Blip2Config(PretrainedConfig):
251
+ r"""
252
+ [`Blip2Config`] is the configuration class to store the configuration of a [`Blip2ForConditionalGeneration`]. It is
253
+ used to instantiate a BLIP-2 model according to the specified arguments, defining the vision model, Q-Former model
254
+ and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to
255
+ that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture.
256
+
257
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
258
+ documentation from [`PretrainedConfig`] for more information.
259
+
260
+ Args:
261
+ vision_config (`dict`, *optional*):
262
+ Dictionary of configuration options used to initialize [`Blip2VisionConfig`].
263
+ qformer_config (`dict`, *optional*):
264
+ Dictionary of configuration options used to initialize [`Blip2QFormerConfig`].
265
+ text_config (`dict`, *optional*):
266
+ Dictionary of configuration options used to initialize any [`PretrainedConfig`].
267
+ num_query_tokens (`int`, *optional*, defaults to 32):
268
+ The number of query tokens passed through the Transformer.
269
+
270
+ kwargs (*optional*):
271
+ Dictionary of keyword arguments.
272
+
273
+ Example:
274
+
275
+ ```python
276
+ >>> from transformers import (
277
+ ... Blip2VisionConfig,
278
+ ... Blip2QFormerConfig,
279
+ ... OPTConfig,
280
+ ... Blip2Config,
281
+ ... Blip2ForConditionalGeneration,
282
+ ... )
283
+
284
+ >>> # Initializing a Blip2Config with Salesforce/blip2-opt-2.7b style configuration
285
+ >>> configuration = Blip2Config()
286
+
287
+ >>> # Initializing a Blip2ForConditionalGeneration (with random weights) from the Salesforce/blip2-opt-2.7b style configuration
288
+ >>> model = Blip2ForConditionalGeneration(configuration)
289
+
290
+ >>> # Accessing the model configuration
291
+ >>> configuration = model.config
292
+
293
+ >>> # We can also initialize a Blip2Config from a Blip2VisionConfig, Blip2QFormerConfig and any PretrainedConfig
294
+
295
+ >>> # Initializing BLIP-2 vision, BLIP-2 Q-Former and language model configurations
296
+ >>> vision_config = Blip2VisionConfig()
297
+ >>> qformer_config = Blip2QFormerConfig()
298
+ >>> text_config = OPTConfig()
299
+
300
+ >>> config = Blip2Config.from_text_vision_configs(vision_config, qformer_config, text_config)
301
+ ```"""
302
+
303
+ model_type = "blip-2"
304
+
305
+ def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs):
306
+ super().__init__(**kwargs)
307
+
308
+ if vision_config is None:
309
+ vision_config = {}
310
+ logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")
311
+
312
+ if qformer_config is None:
313
+ qformer_config = {}
314
+ logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")
315
+
316
+ if text_config is None:
317
+ text_config = {}
318
+ logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
319
+
320
+ self.vision_config = Blip2VisionConfig(**vision_config)
321
+ self.qformer_config = Blip2QFormerConfig(**qformer_config)
322
+ text_model_type = text_config["model_type"] if "model_type" in text_config else "opt"
323
+ self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
324
+
325
+ self.tie_word_embeddings = self.text_config.tie_word_embeddings
326
+ self.is_encoder_decoder = self.text_config.is_encoder_decoder
327
+
328
+ self.num_query_tokens = num_query_tokens
329
+ self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
330
+ self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
331
+ self.initializer_factor = 1.0
332
+ self.initializer_range = 0.02
333
+
334
+ @classmethod
335
+ def from_vision_qformer_text_configs(
336
+ cls,
337
+ vision_config: Blip2VisionConfig,
338
+ qformer_config: Blip2QFormerConfig,
339
+ text_config: PretrainedConfig,
340
+ **kwargs,
341
+ ):
342
+ r"""
343
+ Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model
344
+ configurations.
345
+
346
+ Returns:
347
+ [`Blip2Config`]: An instance of a configuration object
348
+ """
349
+
350
+ return cls(
351
+ vision_config=vision_config.to_dict(),
352
+ qformer_config=qformer_config.to_dict(),
353
+ text_config=text_config.to_dict(),
354
+ **kwargs,
355
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Convert BLIP-2 checkpoints from the original repository.
17
+
18
+ URL: https://github.com/salesforce/LAVIS/tree/main/projects/blip2
19
+ """
20
+
21
+ import argparse
22
+
23
+ import requests
24
+ import torch
25
+
26
+ # pip3 install salesforce-lavis
27
+ # I'm actually installing a slightly modified version: pip3 install -U git+https://github.com/nielsrogge/LAVIS.git@blip2_float32
28
+ # to make sure we can compare both original and HF implementation in float32
29
+ from lavis.models import load_model_and_preprocess
30
+ from PIL import Image
31
+
32
+ from transformers import (
33
+ AutoTokenizer,
34
+ Blip2Config,
35
+ Blip2ForConditionalGeneration,
36
+ Blip2Processor,
37
+ Blip2VisionConfig,
38
+ BlipImageProcessor,
39
+ OPTConfig,
40
+ T5Config,
41
+ set_seed,
42
+ )
43
+ from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
44
+
45
+
46
+ def load_demo_image():
47
+ url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
48
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
49
+
50
+ return image
51
+
52
+
53
+ # here we list all keys to be renamed (original name on the left, our name on the right)
54
+ def create_rename_keys(config):
55
+ rename_keys = []
56
+ # fmt: off
57
+
58
+ # vision encoder
59
+ rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
60
+ rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
61
+ rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
62
+ rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
63
+ rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
64
+ rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
65
+
66
+ for i in range(config.vision_config.num_hidden_layers):
67
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight"))
68
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias"))
69
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight"))
70
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias"))
71
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight"))
72
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",))
73
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias"))
74
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight"))
75
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias"))
76
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight"))
77
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias"))
78
+
79
+ # QFormer
80
+ rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight"))
81
+ rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias"))
82
+
83
+ # fmt: on
84
+ return rename_keys
85
+
86
+
87
+ def rename_key(dct, old, new):
88
+ val = dct.pop(old)
89
+ dct[new] = val
90
+
91
+
92
+ def read_in_q_v_bias(state_dict, config):
93
+ for i in range(config.vision_config.num_hidden_layers):
94
+ # read in original q and v biases
95
+ q_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias")
96
+ v_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias")
97
+
98
+ # next, set bias in the state dict
99
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
100
+ state_dict[f"vision_model.encoder.layers.{i}.self_attn.qkv.bias"] = qkv_bias
101
+
102
+
103
+ def get_blip2_config(model_name, eos_token_id):
104
+ image_size = 364 if "coco" in model_name else 224
105
+ vision_config = Blip2VisionConfig(image_size=image_size).to_dict()
106
+
107
+ # make sure the models have proper bos_token_id and eos_token_id set (important for generation)
108
+ # seems like flan-T5 models don't have bos_token_id properly set?
109
+ if "opt-2.7b" in model_name:
110
+ text_config = OPTConfig.from_pretrained("facebook/opt-2.7b", eos_token_id=eos_token_id).to_dict()
111
+ elif "opt-6.7b" in model_name:
112
+ text_config = OPTConfig.from_pretrained("facebook/opt-6.7b", eos_token_id=eos_token_id).to_dict()
113
+ elif "t5-xl" in model_name:
114
+ text_config = T5Config.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1).to_dict()
115
+ elif "t5-xxl" in model_name:
116
+ text_config = T5Config.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1).to_dict()
117
+
118
+ config = Blip2Config(vision_config=vision_config, text_config=text_config)
119
+
120
+ return config, image_size
121
+
122
+
123
+ @torch.no_grad()
124
+ def convert_blip2_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
125
+ """
126
+ Copy/paste/tweak model's weights to Transformers design.
127
+ """
128
+ tokenizer = (
129
+ AutoTokenizer.from_pretrained("facebook/opt-2.7b")
130
+ if "opt" in model_name
131
+ else AutoTokenizer.from_pretrained("google/flan-t5-xl")
132
+ )
133
+ eos_token_id = tokenizer("\n", add_special_tokens=False).input_ids[0]
134
+ config, image_size = get_blip2_config(model_name, eos_token_id=eos_token_id)
135
+
136
+ hf_model = Blip2ForConditionalGeneration(config).eval()
137
+
138
+ model_name_to_original = {
139
+ "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
140
+ "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
141
+ "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
142
+ "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
143
+ "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
144
+ "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
145
+ "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
146
+ }
147
+
148
+ name, type = model_name_to_original[model_name]
149
+
150
+ # note: this script is tested on 2 GPUs, as models are compared in float32,
151
+ # which requires quite some memory. Hence loading both on a
152
+ # separate device is the easiest to compare
153
+ hf_model_device = "cuda:0" if torch.cuda.is_available() else "cpu"
154
+ lavis_device = "cuda:1" if torch.cuda.is_available() else "cpu"
155
+
156
+ # load original model
157
+ print("Loading original model...")
158
+ original_model, vis_processors, _ = load_model_and_preprocess(
159
+ name=name, model_type=type, is_eval=True, device=lavis_device
160
+ )
161
+ original_model.eval()
162
+ print("Done!")
163
+
164
+ # update state dict keys
165
+ state_dict = original_model.state_dict()
166
+ rename_keys = create_rename_keys(config)
167
+ for src, dest in rename_keys:
168
+ rename_key(state_dict, src, dest)
169
+
170
+ # some keys can be renamed efficiently
171
+ for key, val in state_dict.copy().items():
172
+ val = state_dict.pop(key)
173
+ if key.startswith("Qformer.bert"):
174
+ key = key.replace("Qformer.bert", "qformer")
175
+ if "attention.self" in key:
176
+ key = key.replace("self", "attention")
177
+ if "opt_proj" in key:
178
+ key = key.replace("opt_proj", "language_projection")
179
+ if "t5_proj" in key:
180
+ key = key.replace("t5_proj", "language_projection")
181
+ if key.startswith("opt"):
182
+ key = key.replace("opt", "language")
183
+ if key.startswith("t5"):
184
+ key = key.replace("t5", "language")
185
+ state_dict[key] = val
186
+
187
+ # read in qv biases
188
+ read_in_q_v_bias(state_dict, config)
189
+
190
+ missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=False)
191
+ assert len(missing_keys) == 0
192
+ assert unexpected_keys == ["qformer.embeddings.position_ids"]
193
+
194
+ image = load_demo_image()
195
+ original_pixel_values = vis_processors["eval"](image).unsqueeze(0).to(lavis_device)
196
+ input_ids = tokenizer(["\n"], return_tensors="pt").input_ids.to(hf_model_device)
197
+
198
+ # create processor
199
+ image_processor = BlipImageProcessor(
200
+ size={"height": image_size, "width": image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD
201
+ )
202
+ processor = Blip2Processor(image_processor=image_processor, tokenizer=tokenizer)
203
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(hf_model_device)
204
+
205
+ # make sure processor creates exact same pixel values
206
+ assert torch.allclose(pixel_values, original_pixel_values.to(pixel_values.device))
207
+
208
+ original_model.to(lavis_device)
209
+ hf_model.to(hf_model_device)
210
+ with torch.no_grad():
211
+ if "opt" in model_name:
212
+ original_logits = original_model({"image": original_pixel_values, "text_input": [""]}).logits
213
+ logits = hf_model(pixel_values, input_ids).logits
214
+ else:
215
+ original_logits = original_model(
216
+ {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]}
217
+ ).logits
218
+ labels = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100)
219
+ logits = hf_model(pixel_values, input_ids, labels=labels).logits
220
+
221
+ assert original_logits.shape == logits.shape
222
+ print("First values of original logits:", original_logits[0, :3, :3])
223
+ print("First values of HF logits:", logits[0, :3, :3])
224
+
225
+ # assert values
226
+ assert torch.allclose(original_logits.to(logits.device), logits, atol=1e-4)
227
+ print("Looks ok!")
228
+
229
+ print("Generating a caption...")
230
+ prompt = "Question: what object is in this image? Answer:"
231
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(hf_model_device)
232
+
233
+ set_seed(42)
234
+
235
+ original_outputs = original_model.generate(
236
+ {"image": original_pixel_values, "prompt": prompt}, use_nucleus_sampling=True
237
+ )
238
+ outputs = hf_model.generate(
239
+ pixel_values,
240
+ input_ids,
241
+ do_sample=True,
242
+ num_beams=5,
243
+ max_length=30,
244
+ min_length=1,
245
+ top_p=0.9,
246
+ repetition_penalty=1.0,
247
+ length_penalty=1.0,
248
+ temperature=1,
249
+ )
250
+ output_text = processor.batch_decode(outputs, skip_special_tokens=True)
251
+ output_text = [text.strip() for text in output_text]
252
+ print("Original generation:", original_outputs)
253
+ print("HF generation:", output_text)
254
+
255
+ if pytorch_dump_folder_path is not None:
256
+ processor.save_pretrained(pytorch_dump_folder_path)
257
+ hf_model.save_pretrained(pytorch_dump_folder_path)
258
+
259
+ if push_to_hub:
260
+ processor.push_to_hub(f"nielsr/{model_name}")
261
+ hf_model.push_to_hub(f"nielsr/{model_name}")
262
+
263
+
264
+ if __name__ == "__main__":
265
+ parser = argparse.ArgumentParser()
266
+ choices = [
267
+ "blip2-opt-2.7b",
268
+ "blip2-opt-6.7b",
269
+ "blip2-opt-2.7b-coco",
270
+ "blip2-opt-6.7b-coco",
271
+ "blip2-flan-t5-xl",
272
+ "blip2-flan-t5-xl-coco",
273
+ "blip2-flan-t5-xxl",
274
+ ]
275
+ parser.add_argument(
276
+ "--model_name",
277
+ default="blip2-opt-2.7b",
278
+ choices=choices,
279
+ type=str,
280
+ help="Path to hf config.json of model to convert",
281
+ )
282
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
283
+ parser.add_argument(
284
+ "--push_to_hub",
285
+ action="store_true",
286
+ help="Whether to push the model and processor to the hub after converting",
287
+ )
288
+
289
+ args = parser.parse_args()
290
+
291
+ convert_blip2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py ADDED
@@ -0,0 +1,1853 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BLIP-2 model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPooling,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM
43
+ from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ _CHECKPOINT_FOR_DOC = "Salesforce/blip2-opt-2.7b"
49
+
50
+
51
+ from ..deprecated._archive_maps import BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
52
+
53
+
54
+ @dataclass
55
+ class Blip2ForConditionalGenerationModelOutput(ModelOutput):
56
+ """
57
+ Class defining the outputs of [`Blip2ForConditionalGeneration`].
58
+
59
+ Args:
60
+ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
61
+ Language modeling loss from the language model.
62
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
63
+ Prediction scores of the language modeling head of the language model.
64
+ vision_outputs (`BaseModelOutputWithPooling`):
65
+ Outputs of the vision encoder.
66
+ qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
67
+ Outputs of the Q-Former (Querying Transformer).
68
+ language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
69
+ Outputs of the language model.
70
+ """
71
+
72
+ loss: Optional[Tuple[torch.FloatTensor]] = None
73
+ logits: Optional[Tuple[torch.FloatTensor]] = None
74
+ vision_outputs: Optional[torch.FloatTensor] = None
75
+ qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
76
+ language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
77
+
78
+ def to_tuple(self) -> Tuple[Any]:
79
+ return tuple(
80
+ self[k]
81
+ if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
82
+ else getattr(self, k).to_tuple()
83
+ for k in self.keys()
84
+ )
85
+
86
+
87
+ # Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2
88
+ class Blip2VisionEmbeddings(nn.Module):
89
+ def __init__(self, config: Blip2VisionConfig):
90
+ super().__init__()
91
+ self.config = config
92
+ self.embed_dim = config.hidden_size
93
+ self.image_size = config.image_size
94
+ self.patch_size = config.patch_size
95
+
96
+ self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
97
+
98
+ self.patch_embedding = nn.Conv2d(
99
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
100
+ )
101
+
102
+ self.num_patches = (self.image_size // self.patch_size) ** 2
103
+ self.num_positions = self.num_patches + 1
104
+
105
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
106
+
107
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
108
+ batch_size = pixel_values.shape[0]
109
+ target_dtype = self.patch_embedding.weight.dtype
110
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
111
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
112
+
113
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
114
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
115
+ embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
116
+ return embeddings
117
+
118
+
119
+ class Blip2Attention(nn.Module):
120
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
121
+
122
+ def __init__(self, config):
123
+ super().__init__()
124
+ self.config = config
125
+ self.embed_dim = config.hidden_size
126
+ self.num_heads = config.num_attention_heads
127
+ self.head_dim = self.embed_dim // self.num_heads
128
+ if self.head_dim * self.num_heads != self.embed_dim:
129
+ raise ValueError(
130
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
131
+ f" {self.num_heads})."
132
+ )
133
+ self.scale = self.head_dim**-0.5
134
+ self.dropout = nn.Dropout(config.attention_dropout)
135
+
136
+ # small tweak here compared to CLIP, no bias here
137
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
138
+
139
+ if config.qkv_bias:
140
+ q_bias = nn.Parameter(torch.zeros(self.embed_dim))
141
+ v_bias = nn.Parameter(torch.zeros(self.embed_dim))
142
+ else:
143
+ q_bias = None
144
+ v_bias = None
145
+
146
+ if q_bias is not None:
147
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
148
+ self.qkv.bias = nn.Parameter(qkv_bias)
149
+
150
+ self.projection = nn.Linear(self.embed_dim, self.embed_dim)
151
+
152
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
153
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
154
+
155
+ def forward(
156
+ self,
157
+ hidden_states: torch.Tensor,
158
+ head_mask: Optional[torch.Tensor] = None,
159
+ output_attentions: Optional[bool] = False,
160
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
161
+ """Input shape: Batch x Time x Channel"""
162
+
163
+ bsz, tgt_len, embed_dim = hidden_states.size()
164
+
165
+ mixed_qkv = self.qkv(hidden_states)
166
+
167
+ mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
168
+ 2, 0, 3, 1, 4
169
+ )
170
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
171
+
172
+ # Take the dot product between "query" and "key" to get the raw attention scores.
173
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
174
+
175
+ attention_scores = attention_scores * self.scale
176
+
177
+ # Normalize the attention scores to probabilities.
178
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
179
+
180
+ # This is actually dropping out entire tokens to attend to, which might
181
+ # seem a bit unusual, but is taken from the original Transformer paper.
182
+ attention_probs = self.dropout(attention_probs)
183
+
184
+ # Mask heads if we want to
185
+ if head_mask is not None:
186
+ attention_probs = attention_probs * head_mask
187
+
188
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
189
+
190
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
191
+ context_layer = context_layer.reshape(new_context_layer_shape)
192
+
193
+ output = self.projection(context_layer)
194
+
195
+ outputs = (output, attention_probs) if output_attentions else (output, None)
196
+
197
+ return outputs
198
+
199
+
200
+ # Copied from transformers.models.blip.modeling_blip.BlipMLP
201
+ class Blip2MLP(nn.Module):
202
+ def __init__(self, config):
203
+ super().__init__()
204
+ self.config = config
205
+ self.activation_fn = ACT2FN[config.hidden_act]
206
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
207
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
208
+
209
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
210
+ hidden_states = self.fc1(hidden_states)
211
+ hidden_states = self.activation_fn(hidden_states)
212
+ hidden_states = self.fc2(hidden_states)
213
+ return hidden_states
214
+
215
+
216
+ # Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->Blip2
217
+ class Blip2EncoderLayer(nn.Module):
218
+ def __init__(self, config: Blip2Config):
219
+ super().__init__()
220
+ self.embed_dim = config.hidden_size
221
+ self.self_attn = Blip2Attention(config)
222
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
223
+ self.mlp = Blip2MLP(config)
224
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
225
+
226
+ def forward(
227
+ self,
228
+ hidden_states: torch.Tensor,
229
+ attention_mask: torch.Tensor,
230
+ output_attentions: Optional[bool] = False,
231
+ ) -> Tuple[torch.FloatTensor]:
232
+ """
233
+ Args:
234
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
235
+ attention_mask (`torch.FloatTensor`): attention mask of size
236
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
237
+ `(config.encoder_attention_heads,)`.
238
+ output_attentions (`bool`, *optional*):
239
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
240
+ returned tensors for more detail.
241
+ """
242
+ residual = hidden_states
243
+
244
+ hidden_states = self.layer_norm1(hidden_states)
245
+ hidden_states, attn_weights = self.self_attn(
246
+ hidden_states=hidden_states,
247
+ head_mask=attention_mask,
248
+ output_attentions=output_attentions,
249
+ )
250
+ hidden_states = hidden_states + residual
251
+ residual = hidden_states
252
+ hidden_states = self.layer_norm2(hidden_states)
253
+ hidden_states = self.mlp(hidden_states)
254
+
255
+ hidden_states = hidden_states + residual
256
+
257
+ outputs = (hidden_states,)
258
+
259
+ if output_attentions:
260
+ outputs += (attn_weights,)
261
+
262
+ return outputs
263
+
264
+
265
+ class Blip2PreTrainedModel(PreTrainedModel):
266
+ """
267
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
268
+ models.
269
+ """
270
+
271
+ config_class = Blip2Config
272
+ base_model_prefix = "blip"
273
+ supports_gradient_checkpointing = True
274
+ _no_split_modules = ["Blip2Attention", "T5Block", "OPTDecoderLayer"]
275
+ _skip_keys_device_placement = "past_key_values"
276
+ _keep_in_fp32_modules = ["wo"]
277
+
278
+ def _init_weights(self, module):
279
+ """Initialize the weights"""
280
+ factor = self.config.initializer_range
281
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
282
+ module.weight.data.normal_(mean=0.0, std=factor)
283
+ if hasattr(module, "bias") and module.bias is not None:
284
+ module.bias.data.zero_()
285
+
286
+ if isinstance(module, Blip2VisionEmbeddings):
287
+ if hasattr(self.config, "vision_config"):
288
+ factor = self.config.vision_config.initializer_range
289
+ nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
290
+ nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
291
+
292
+ elif isinstance(module, nn.LayerNorm):
293
+ module.bias.data.zero_()
294
+ module.weight.data.fill_(1.0)
295
+ elif isinstance(module, nn.Linear) and module.bias is not None:
296
+ module.bias.data.zero_()
297
+
298
+
299
+ BLIP_2_START_DOCSTRING = r"""
300
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
301
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
302
+ etc.)
303
+
304
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
305
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
306
+ and behavior.
307
+
308
+ Parameters:
309
+ config ([`Blip2Config`]): Model configuration class with all the parameters of the model.
310
+ Initializing with a config file does not load the weights associated with the model, only the
311
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
312
+ """
313
+
314
+ BLIP_2_VISION_INPUTS_DOCSTRING = r"""
315
+ Args:
316
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
317
+ Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
318
+ details.
319
+ output_attentions (`bool`, *optional*):
320
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
321
+ tensors for more detail.
322
+ output_hidden_states (`bool`, *optional*):
323
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
324
+ more detail.
325
+ return_dict (`bool`, *optional*):
326
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
327
+ """
328
+
329
+ BLIP_2_TEXT_INPUTS_DOCSTRING = r"""
330
+ Args:
331
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
332
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
333
+ it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
334
+ [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
335
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
336
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
337
+ - 1 for tokens that are **not masked**,
338
+ - 0 for tokens that are **masked**.
339
+ [What are attention masks?](../glossary#attention-mask)
340
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
341
+ Indices of decoder input sequence tokens in the vocabulary.
342
+
343
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
344
+ [`PreTrainedTokenizer.__call__`] for details.
345
+
346
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
347
+
348
+ T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
349
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
350
+
351
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5
352
+ Training](./t5#training).
353
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
354
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
355
+ be used by default.
356
+ output_attentions (`bool`, *optional*):
357
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
358
+ tensors for more detail.
359
+ output_hidden_states (`bool`, *optional*):
360
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
361
+ more detail.
362
+ return_dict (`bool`, *optional*):
363
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
364
+ """
365
+
366
+ BLIP_2_INPUTS_DOCSTRING = r"""
367
+ Args:
368
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
369
+ Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
370
+ details.
371
+
372
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
373
+ Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
374
+ provided to serve as text prompt, which the language model can continue.
375
+
376
+ Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details.
377
+
378
+ [What are input IDs?](../glossary#input-ids)
379
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
380
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
381
+
382
+ - 1 for tokens that are **not masked**,
383
+ - 0 for tokens that are **masked**.
384
+
385
+ [What are attention masks?](../glossary#attention-mask)
386
+
387
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
388
+ Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an
389
+ encoder-decoder language model (like T5) is used.
390
+
391
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
392
+ [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)
393
+
394
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
395
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
396
+ be used by default.
397
+
398
+ Only relevant in case an encoder-decoder language model (like T5) is used.
399
+
400
+ output_attentions (`bool`, *optional*):
401
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
402
+ tensors for more detail.
403
+ output_hidden_states (`bool`, *optional*):
404
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
405
+ more detail.
406
+ return_dict (`bool`, *optional*):
407
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
408
+ """
409
+
410
+
411
+ # Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->Blip2
412
+ class Blip2Encoder(nn.Module):
413
+ """
414
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
415
+ [`Blip2EncoderLayer`].
416
+
417
+ Args:
418
+ config (`Blip2Config`):
419
+ The corresponding vision configuration for the `Blip2Encoder`.
420
+ """
421
+
422
+ def __init__(self, config: Blip2Config):
423
+ super().__init__()
424
+ self.config = config
425
+ self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
426
+ self.gradient_checkpointing = False
427
+
428
+ def forward(
429
+ self,
430
+ inputs_embeds,
431
+ attention_mask: Optional[torch.Tensor] = None,
432
+ output_attentions: Optional[bool] = None,
433
+ output_hidden_states: Optional[bool] = None,
434
+ return_dict: Optional[bool] = None,
435
+ ) -> Union[Tuple, BaseModelOutput]:
436
+ r"""
437
+ Args:
438
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
439
+ Embedded representation of the inputs. Should be float, not int tokens.
440
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
441
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
442
+
443
+ - 1 for tokens that are **not masked**,
444
+ - 0 for tokens that are **masked**.
445
+
446
+ [What are attention masks?](../glossary#attention-mask)
447
+ output_attentions (`bool`, *optional*):
448
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
449
+ returned tensors for more detail.
450
+ output_hidden_states (`bool`, *optional*):
451
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
452
+ for more detail.
453
+ return_dict (`bool`, *optional*):
454
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
455
+ """
456
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
457
+ output_hidden_states = (
458
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
459
+ )
460
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
461
+
462
+ encoder_states = () if output_hidden_states else None
463
+ all_attentions = () if output_attentions else None
464
+
465
+ hidden_states = inputs_embeds
466
+ for idx, encoder_layer in enumerate(self.layers):
467
+ if output_hidden_states:
468
+ encoder_states = encoder_states + (hidden_states,)
469
+ if self.gradient_checkpointing and self.training:
470
+ layer_outputs = self._gradient_checkpointing_func(
471
+ encoder_layer.__call__,
472
+ hidden_states,
473
+ attention_mask,
474
+ output_attentions,
475
+ )
476
+ else:
477
+ layer_outputs = encoder_layer(
478
+ hidden_states,
479
+ attention_mask,
480
+ output_attentions=output_attentions,
481
+ )
482
+
483
+ hidden_states = layer_outputs[0]
484
+
485
+ if output_attentions:
486
+ all_attentions = all_attentions + (layer_outputs[1],)
487
+
488
+ if output_hidden_states:
489
+ encoder_states = encoder_states + (hidden_states,)
490
+
491
+ if not return_dict:
492
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
493
+ return BaseModelOutput(
494
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
495
+ )
496
+
497
+
498
+ # Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2
499
+ class Blip2VisionModel(Blip2PreTrainedModel):
500
+ main_input_name = "pixel_values"
501
+ config_class = Blip2VisionConfig
502
+
503
+ def __init__(self, config: Blip2VisionConfig):
504
+ super().__init__(config)
505
+ self.config = config
506
+ embed_dim = config.hidden_size
507
+
508
+ self.embeddings = Blip2VisionEmbeddings(config)
509
+ self.encoder = Blip2Encoder(config)
510
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
511
+
512
+ self.post_init()
513
+
514
+ @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING)
515
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig)
516
+ def forward(
517
+ self,
518
+ pixel_values: Optional[torch.FloatTensor] = None,
519
+ output_attentions: Optional[bool] = None,
520
+ output_hidden_states: Optional[bool] = None,
521
+ return_dict: Optional[bool] = None,
522
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
523
+ r"""
524
+ Returns:
525
+
526
+ """
527
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
528
+ output_hidden_states = (
529
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
530
+ )
531
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
532
+
533
+ if pixel_values is None:
534
+ raise ValueError("You have to specify pixel_values")
535
+
536
+ hidden_states = self.embeddings(pixel_values)
537
+
538
+ encoder_outputs = self.encoder(
539
+ inputs_embeds=hidden_states,
540
+ output_attentions=output_attentions,
541
+ output_hidden_states=output_hidden_states,
542
+ return_dict=return_dict,
543
+ )
544
+
545
+ last_hidden_state = encoder_outputs[0]
546
+ last_hidden_state = self.post_layernorm(last_hidden_state)
547
+
548
+ pooled_output = last_hidden_state[:, 0, :]
549
+ pooled_output = self.post_layernorm(pooled_output)
550
+
551
+ if not return_dict:
552
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
553
+
554
+ return BaseModelOutputWithPooling(
555
+ last_hidden_state=last_hidden_state,
556
+ pooler_output=pooled_output,
557
+ hidden_states=encoder_outputs.hidden_states,
558
+ attentions=encoder_outputs.attentions,
559
+ )
560
+
561
+ def get_input_embeddings(self):
562
+ return self.embeddings
563
+
564
+
565
+ class Blip2QFormerMultiHeadAttention(nn.Module):
566
+ def __init__(self, config, is_cross_attention=False):
567
+ super().__init__()
568
+ self.config = config
569
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
570
+ raise ValueError(
571
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
572
+ % (config.hidden_size, config.num_attention_heads)
573
+ )
574
+
575
+ self.num_attention_heads = config.num_attention_heads
576
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
577
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
578
+
579
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
580
+ if is_cross_attention:
581
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
582
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
583
+ else:
584
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
585
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
586
+
587
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
588
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
589
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
590
+ self.max_position_embeddings = config.max_position_embeddings
591
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
592
+ self.save_attention = False
593
+
594
+ def save_attn_gradients(self, attn_gradients):
595
+ self.attn_gradients = attn_gradients
596
+
597
+ def get_attn_gradients(self):
598
+ return self.attn_gradients
599
+
600
+ def save_attention_map(self, attention_map):
601
+ self.attention_map = attention_map
602
+
603
+ def get_attention_map(self):
604
+ return self.attention_map
605
+
606
+ def transpose_for_scores(self, x):
607
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
608
+ x = x.view(*new_x_shape)
609
+ return x.permute(0, 2, 1, 3)
610
+
611
+ def forward(
612
+ self,
613
+ hidden_states,
614
+ attention_mask=None,
615
+ head_mask=None,
616
+ encoder_hidden_states=None,
617
+ encoder_attention_mask=None,
618
+ past_key_value=None,
619
+ output_attentions=False,
620
+ ):
621
+ # If this is instantiated as a cross-attention module, the keys
622
+ # and values come from an encoder; the attention mask needs to be
623
+ # such that the encoder's padding tokens are not attended to.
624
+ is_cross_attention = encoder_hidden_states is not None
625
+
626
+ if is_cross_attention:
627
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
628
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
629
+ attention_mask = encoder_attention_mask
630
+ elif past_key_value is not None:
631
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
632
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
633
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
634
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
635
+ else:
636
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
637
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
638
+
639
+ mixed_query_layer = self.query(hidden_states)
640
+
641
+ query_layer = self.transpose_for_scores(mixed_query_layer)
642
+
643
+ past_key_value = (key_layer, value_layer)
644
+
645
+ # Take the dot product between "query" and "key" to get the raw attention scores.
646
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
647
+
648
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
649
+ seq_length = hidden_states.size()[1]
650
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
651
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
652
+ distance = position_ids_l - position_ids_r
653
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
654
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
655
+
656
+ if self.position_embedding_type == "relative_key":
657
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
658
+ attention_scores = attention_scores + relative_position_scores
659
+ elif self.position_embedding_type == "relative_key_query":
660
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
661
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
662
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
663
+
664
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
665
+
666
+ if attention_mask is not None:
667
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
668
+ attention_scores = attention_scores + attention_mask
669
+
670
+ # Normalize the attention scores to probabilities.
671
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
672
+
673
+ if is_cross_attention and self.save_attention:
674
+ self.save_attention_map(attention_probs)
675
+ attention_probs.register_hook(self.save_attn_gradients)
676
+
677
+ # This is actually dropping out entire tokens to attend to, which might
678
+ # seem a bit unusual, but is taken from the original Transformer paper.
679
+ attention_probs_dropped = self.dropout(attention_probs)
680
+
681
+ # Mask heads if we want to
682
+ if head_mask is not None:
683
+ attention_probs_dropped = attention_probs_dropped * head_mask
684
+
685
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
686
+
687
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
688
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
689
+ context_layer = context_layer.view(*new_context_layer_shape)
690
+
691
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
692
+
693
+ outputs = outputs + (past_key_value,)
694
+ return outputs
695
+
696
+
697
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Blip2QFormer
698
+ class Blip2QFormerSelfOutput(nn.Module):
699
+ def __init__(self, config):
700
+ super().__init__()
701
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
702
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
703
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
704
+
705
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
706
+ hidden_states = self.dense(hidden_states)
707
+ hidden_states = self.dropout(hidden_states)
708
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
709
+ return hidden_states
710
+
711
+
712
+ class Blip2QFormerAttention(nn.Module):
713
+ def __init__(self, config, is_cross_attention=False):
714
+ super().__init__()
715
+ self.attention = Blip2QFormerMultiHeadAttention(config, is_cross_attention)
716
+ self.output = Blip2QFormerSelfOutput(config)
717
+ self.pruned_heads = set()
718
+
719
+ def prune_heads(self, heads):
720
+ if len(heads) == 0:
721
+ return
722
+ heads, index = find_pruneable_heads_and_indices(
723
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
724
+ )
725
+
726
+ # Prune linear layers
727
+ self.attention.query = prune_linear_layer(self.attention.query, index)
728
+ self.attention.key = prune_linear_layer(self.attention.key, index)
729
+ self.attention.value = prune_linear_layer(self.attention.value, index)
730
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
731
+
732
+ # Update hyper params and store pruned heads
733
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
734
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
735
+ self.pruned_heads = self.pruned_heads.union(heads)
736
+
737
+ def forward(
738
+ self,
739
+ hidden_states: torch.Tensor,
740
+ attention_mask: Optional[torch.FloatTensor] = None,
741
+ head_mask: Optional[torch.FloatTensor] = None,
742
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
743
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
744
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
745
+ output_attentions: Optional[bool] = False,
746
+ ) -> Tuple[torch.Tensor]:
747
+ self_outputs = self.attention(
748
+ hidden_states,
749
+ attention_mask,
750
+ head_mask,
751
+ encoder_hidden_states,
752
+ encoder_attention_mask,
753
+ past_key_value,
754
+ output_attentions,
755
+ )
756
+ attention_output = self.output(self_outputs[0], hidden_states)
757
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
758
+ return outputs
759
+
760
+
761
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Blip2QFormer
762
+ class Blip2QFormerIntermediate(nn.Module):
763
+ def __init__(self, config):
764
+ super().__init__()
765
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
766
+ if isinstance(config.hidden_act, str):
767
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
768
+ else:
769
+ self.intermediate_act_fn = config.hidden_act
770
+
771
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
772
+ hidden_states = self.dense(hidden_states)
773
+ hidden_states = self.intermediate_act_fn(hidden_states)
774
+ return hidden_states
775
+
776
+
777
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Blip2QFormer
778
+ class Blip2QFormerOutput(nn.Module):
779
+ def __init__(self, config):
780
+ super().__init__()
781
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
782
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
783
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
784
+
785
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
786
+ hidden_states = self.dense(hidden_states)
787
+ hidden_states = self.dropout(hidden_states)
788
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
789
+ return hidden_states
790
+
791
+
792
+ class Blip2QFormerLayer(nn.Module):
793
+ def __init__(self, config, layer_idx):
794
+ super().__init__()
795
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
796
+ self.seq_len_dim = 1
797
+ self.attention = Blip2QFormerAttention(config)
798
+
799
+ self.layer_idx = layer_idx
800
+
801
+ if layer_idx % config.cross_attention_frequency == 0:
802
+ self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True)
803
+ self.has_cross_attention = True
804
+ else:
805
+ self.has_cross_attention = False
806
+
807
+ self.intermediate_query = Blip2QFormerIntermediate(config)
808
+ self.output_query = Blip2QFormerOutput(config)
809
+
810
+ def forward(
811
+ self,
812
+ hidden_states,
813
+ attention_mask=None,
814
+ head_mask=None,
815
+ encoder_hidden_states=None,
816
+ encoder_attention_mask=None,
817
+ past_key_value=None,
818
+ output_attentions=False,
819
+ query_length=0,
820
+ ):
821
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
822
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
823
+ self_attention_outputs = self.attention(
824
+ hidden_states,
825
+ attention_mask,
826
+ head_mask,
827
+ output_attentions=output_attentions,
828
+ past_key_value=self_attn_past_key_value,
829
+ )
830
+ attention_output = self_attention_outputs[0]
831
+ outputs = self_attention_outputs[1:-1]
832
+
833
+ present_key_value = self_attention_outputs[-1]
834
+
835
+ if query_length > 0:
836
+ query_attention_output = attention_output[:, :query_length, :]
837
+
838
+ if self.has_cross_attention:
839
+ if encoder_hidden_states is None:
840
+ raise ValueError("encoder_hidden_states must be given for cross-attention layers")
841
+ cross_attention_outputs = self.crossattention(
842
+ query_attention_output,
843
+ attention_mask,
844
+ head_mask,
845
+ encoder_hidden_states,
846
+ encoder_attention_mask,
847
+ output_attentions=output_attentions,
848
+ )
849
+ query_attention_output = cross_attention_outputs[0]
850
+ # add cross attentions if we output attention weights
851
+ outputs = outputs + cross_attention_outputs[1:-1]
852
+
853
+ layer_output = apply_chunking_to_forward(
854
+ self.feed_forward_chunk_query,
855
+ self.chunk_size_feed_forward,
856
+ self.seq_len_dim,
857
+ query_attention_output,
858
+ )
859
+
860
+ if attention_output.shape[1] > query_length:
861
+ layer_output_text = apply_chunking_to_forward(
862
+ self.feed_forward_chunk,
863
+ self.chunk_size_feed_forward,
864
+ self.seq_len_dim,
865
+ attention_output[:, query_length:, :],
866
+ )
867
+ layer_output = torch.cat([layer_output, layer_output_text], dim=1)
868
+ else:
869
+ layer_output = apply_chunking_to_forward(
870
+ self.feed_forward_chunk,
871
+ self.chunk_size_feed_forward,
872
+ self.seq_len_dim,
873
+ attention_output,
874
+ )
875
+ outputs = (layer_output,) + outputs
876
+
877
+ outputs = outputs + (present_key_value,)
878
+
879
+ return outputs
880
+
881
+ def feed_forward_chunk(self, attention_output):
882
+ intermediate_output = self.intermediate(attention_output)
883
+ layer_output = self.output(intermediate_output, attention_output)
884
+ return layer_output
885
+
886
+ def feed_forward_chunk_query(self, attention_output):
887
+ intermediate_output = self.intermediate_query(attention_output)
888
+ layer_output = self.output_query(intermediate_output, attention_output)
889
+ return layer_output
890
+
891
+
892
+ class Blip2QFormerEncoder(nn.Module):
893
+ def __init__(self, config):
894
+ super().__init__()
895
+ self.config = config
896
+ self.layer = nn.ModuleList(
897
+ [Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
898
+ )
899
+ self.gradient_checkpointing = False
900
+
901
+ def forward(
902
+ self,
903
+ hidden_states,
904
+ attention_mask=None,
905
+ head_mask=None,
906
+ encoder_hidden_states=None,
907
+ encoder_attention_mask=None,
908
+ past_key_values=None,
909
+ use_cache=None,
910
+ output_attentions=False,
911
+ output_hidden_states=False,
912
+ return_dict=True,
913
+ query_length=0,
914
+ ):
915
+ all_hidden_states = () if output_hidden_states else None
916
+ all_self_attentions = () if output_attentions else None
917
+ all_cross_attentions = () if output_attentions else None
918
+
919
+ next_decoder_cache = () if use_cache else None
920
+
921
+ for i in range(self.config.num_hidden_layers):
922
+ layer_module = self.layer[i]
923
+ if output_hidden_states:
924
+ all_hidden_states = all_hidden_states + (hidden_states,)
925
+
926
+ layer_head_mask = head_mask[i] if head_mask is not None else None
927
+ past_key_value = past_key_values[i] if past_key_values is not None else None
928
+
929
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
930
+ if use_cache:
931
+ logger.warning(
932
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
933
+ )
934
+ use_cache = False
935
+ layer_outputs = self._gradient_checkpointing_func(
936
+ layer_module.__call__,
937
+ hidden_states,
938
+ attention_mask,
939
+ layer_head_mask,
940
+ encoder_hidden_states,
941
+ encoder_attention_mask,
942
+ )
943
+ else:
944
+ layer_outputs = layer_module(
945
+ hidden_states,
946
+ attention_mask,
947
+ layer_head_mask,
948
+ encoder_hidden_states,
949
+ encoder_attention_mask,
950
+ past_key_value,
951
+ output_attentions,
952
+ query_length,
953
+ )
954
+
955
+ hidden_states = layer_outputs[0]
956
+ if use_cache:
957
+ next_decoder_cache += (layer_outputs[-1],)
958
+ if output_attentions:
959
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
960
+ if layer_module.has_cross_attention:
961
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
962
+
963
+ if output_hidden_states:
964
+ all_hidden_states = all_hidden_states + (hidden_states,)
965
+
966
+ if not return_dict:
967
+ return tuple(
968
+ v
969
+ for v in [
970
+ hidden_states,
971
+ next_decoder_cache,
972
+ all_hidden_states,
973
+ all_self_attentions,
974
+ all_cross_attentions,
975
+ ]
976
+ if v is not None
977
+ )
978
+ return BaseModelOutputWithPastAndCrossAttentions(
979
+ last_hidden_state=hidden_states,
980
+ past_key_values=next_decoder_cache,
981
+ hidden_states=all_hidden_states,
982
+ attentions=all_self_attentions,
983
+ cross_attentions=all_cross_attentions,
984
+ )
985
+
986
+
987
+ class Blip2QFormerModel(Blip2PreTrainedModel):
988
+ """
989
+ Querying Transformer (Q-Former), used in BLIP-2.
990
+ """
991
+
992
+ def __init__(self, config: Blip2QFormerConfig):
993
+ super().__init__(config)
994
+ self.config = config
995
+
996
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
997
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
998
+
999
+ self.encoder = Blip2QFormerEncoder(config)
1000
+
1001
+ self.post_init()
1002
+
1003
+ def get_input_embeddings(self):
1004
+ return self.embeddings.word_embeddings
1005
+
1006
+ def set_input_embeddings(self, value):
1007
+ self.embeddings.word_embeddings = value
1008
+
1009
+ def _prune_heads(self, heads_to_prune):
1010
+ """
1011
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1012
+ class PreTrainedModel
1013
+ """
1014
+ for layer, heads in heads_to_prune.items():
1015
+ self.encoder.layer[layer].attention.prune_heads(heads)
1016
+
1017
+ def get_extended_attention_mask(
1018
+ self,
1019
+ attention_mask: torch.Tensor,
1020
+ input_shape: Tuple[int],
1021
+ device: torch.device,
1022
+ has_query: bool = False,
1023
+ ) -> torch.Tensor:
1024
+ """
1025
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
1026
+
1027
+ Arguments:
1028
+ attention_mask (`torch.Tensor`):
1029
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
1030
+ input_shape (`Tuple[int]`):
1031
+ The shape of the input to the model.
1032
+ device (`torch.device`):
1033
+ The device of the input to the model.
1034
+
1035
+ Returns:
1036
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
1037
+ """
1038
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1039
+ # ourselves in which case we just need to make it broadcastable to all heads.
1040
+ if attention_mask.dim() == 3:
1041
+ extended_attention_mask = attention_mask[:, None, :, :]
1042
+ elif attention_mask.dim() == 2:
1043
+ # Provided a padding mask of dimensions [batch_size, seq_length]
1044
+ # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
1045
+ extended_attention_mask = attention_mask[:, None, None, :]
1046
+ else:
1047
+ raise ValueError(
1048
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
1049
+ input_shape, attention_mask.shape
1050
+ )
1051
+ )
1052
+
1053
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
1054
+ # masked positions, this operation will create a tensor which is 0.0 for
1055
+ # positions we want to attend and -10000.0 for masked positions.
1056
+ # Since we are adding it to the raw scores before the softmax, this is
1057
+ # effectively the same as removing these entirely.
1058
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
1059
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
1060
+ return extended_attention_mask
1061
+
1062
+ def forward(
1063
+ self,
1064
+ query_embeds: torch.FloatTensor,
1065
+ attention_mask: Optional[torch.FloatTensor] = None,
1066
+ head_mask: Optional[torch.FloatTensor] = None,
1067
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1068
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1069
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1070
+ use_cache: Optional[bool] = None,
1071
+ output_attentions: Optional[bool] = None,
1072
+ output_hidden_states: Optional[bool] = None,
1073
+ return_dict: Optional[bool] = None,
1074
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
1075
+ r"""
1076
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):
1077
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1078
+ the model is configured as a decoder.
1079
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):
1080
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1081
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1082
+ - 1 for tokens that are **not masked**,
1083
+ - 0 for tokens that are **masked**.
1084
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
1085
+ shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
1086
+ value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
1087
+ used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
1088
+ value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
1089
+ `(batch_size, sequence_length)`.
1090
+ use_cache (`bool`, `optional`):
1091
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1092
+ `past_key_values`).
1093
+ """
1094
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1095
+ output_hidden_states = (
1096
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1097
+ )
1098
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1099
+
1100
+ # past_key_values_length
1101
+ past_key_values_length = (
1102
+ past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
1103
+ )
1104
+
1105
+ query_length = query_embeds.shape[1] if query_embeds is not None else 0
1106
+
1107
+ embedding_output = self.layernorm(query_embeds)
1108
+ embedding_output = self.dropout(embedding_output)
1109
+
1110
+ input_shape = embedding_output.size()[:-1]
1111
+ batch_size, seq_length = input_shape
1112
+ device = embedding_output.device
1113
+
1114
+ if attention_mask is None:
1115
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
1116
+
1117
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1118
+ # ourselves in which case we just need to make it broadcastable to all heads.
1119
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
1120
+
1121
+ # If a 2D or 3D attention mask is provided for the cross-attention
1122
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1123
+ if encoder_hidden_states is not None:
1124
+ if isinstance(encoder_hidden_states, list):
1125
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
1126
+ else:
1127
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1128
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1129
+
1130
+ if isinstance(encoder_attention_mask, list):
1131
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
1132
+ elif encoder_attention_mask is None:
1133
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1134
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1135
+ else:
1136
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1137
+ else:
1138
+ encoder_extended_attention_mask = None
1139
+
1140
+ # Prepare head mask if needed
1141
+ # 1.0 in head_mask indicate we keep the head
1142
+ # attention_probs has shape bsz x n_heads x N x N
1143
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1144
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1145
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1146
+
1147
+ encoder_outputs = self.encoder(
1148
+ embedding_output,
1149
+ attention_mask=extended_attention_mask,
1150
+ head_mask=head_mask,
1151
+ encoder_hidden_states=encoder_hidden_states,
1152
+ encoder_attention_mask=encoder_extended_attention_mask,
1153
+ past_key_values=past_key_values,
1154
+ use_cache=use_cache,
1155
+ output_attentions=output_attentions,
1156
+ output_hidden_states=output_hidden_states,
1157
+ return_dict=return_dict,
1158
+ query_length=query_length,
1159
+ )
1160
+ sequence_output = encoder_outputs[0]
1161
+ pooled_output = sequence_output[:, 0, :]
1162
+
1163
+ if not return_dict:
1164
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1165
+
1166
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1167
+ last_hidden_state=sequence_output,
1168
+ pooler_output=pooled_output,
1169
+ past_key_values=encoder_outputs.past_key_values,
1170
+ hidden_states=encoder_outputs.hidden_states,
1171
+ attentions=encoder_outputs.attentions,
1172
+ cross_attentions=encoder_outputs.cross_attentions,
1173
+ )
1174
+
1175
+
1176
+ @add_start_docstrings(
1177
+ """
1178
+ BLIP-2 Model for generating text and image features. The model consists of a vision encoder, Querying Transformer
1179
+ (Q-Former) and a language model.
1180
+ """,
1181
+ BLIP_2_START_DOCSTRING,
1182
+ )
1183
+ class Blip2Model(Blip2PreTrainedModel):
1184
+ config_class = Blip2Config
1185
+ main_input_name = "pixel_values"
1186
+
1187
+ def __init__(self, config: Blip2Config):
1188
+ super().__init__(config)
1189
+
1190
+ self.vision_model = Blip2VisionModel(config.vision_config)
1191
+
1192
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
1193
+ self.qformer = Blip2QFormerModel(config.qformer_config)
1194
+
1195
+ self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
1196
+ if config.use_decoder_only_language_model:
1197
+ language_model = AutoModelForCausalLM.from_config(config.text_config)
1198
+ else:
1199
+ language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
1200
+
1201
+ # Update _tied_weights_keys using the base model used.
1202
+ if language_model._tied_weights_keys is not None:
1203
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
1204
+
1205
+ self.language_model = language_model
1206
+
1207
+ # Initialize weights and apply final processing
1208
+ self.post_init()
1209
+
1210
+ def get_input_embeddings(self):
1211
+ return self.language_model.get_input_embeddings()
1212
+
1213
+ def set_input_embeddings(self, value):
1214
+ self.language_model.set_input_embeddings(value)
1215
+
1216
+ def set_output_embeddings(self, new_embeddings):
1217
+ self.language_model.set_output_embeddings(new_embeddings)
1218
+
1219
+ def get_output_embeddings(self) -> nn.Module:
1220
+ return self.language_model.get_output_embeddings()
1221
+
1222
+ def get_encoder(self):
1223
+ return self.language_model.get_encoder()
1224
+
1225
+ def get_decoder(self):
1226
+ return self.language_model.get_decoder()
1227
+
1228
+ def _tie_weights(self):
1229
+ if not self.config.use_decoder_only_language_model:
1230
+ self.language_model.encoder.embed_tokens = self.language_model.shared
1231
+ self.language_model.decoder.embed_tokens = self.language_model.shared
1232
+
1233
+ @add_start_docstrings_to_model_forward(BLIP_2_TEXT_INPUTS_DOCSTRING)
1234
+ def get_text_features(
1235
+ self,
1236
+ input_ids: Optional[torch.Tensor] = None,
1237
+ attention_mask: Optional[torch.Tensor] = None,
1238
+ decoder_input_ids: Optional[torch.Tensor] = None,
1239
+ decoder_attention_mask: Optional[torch.Tensor] = None,
1240
+ labels: Optional[torch.Tensor] = None,
1241
+ output_attentions: Optional[bool] = None,
1242
+ output_hidden_states: Optional[bool] = None,
1243
+ return_dict: Optional[bool] = None,
1244
+ ):
1245
+ r"""
1246
+ Returns:
1247
+ text_outputs (`CausalLMOutputWithPast`, or `tuple(torch.FloatTensor)` if `return_dict=False`):
1248
+ The language model outputs. If `return_dict=True`, the output is a [`CausalLMOutputWithPast`] that
1249
+ contains the language model logits, the past key values and the hidden states if
1250
+ `output_hidden_states=True`.
1251
+ Examples:
1252
+ ```python
1253
+ >>> import torch
1254
+ >>> from transformers import AutoTokenizer, Blip2Model
1255
+
1256
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
1257
+
1258
+ >>> tokenizer = AutoTokenizer.from_pretrained("Salesforce/blip2-opt-2.7b")
1259
+ >>> inputs = tokenizer(["a photo of a cat"], padding=True, return_tensors="pt")
1260
+ >>> text_features = model.get_text_features(**inputs)
1261
+ ```"""
1262
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1263
+ output_hidden_states = (
1264
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1265
+ )
1266
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1267
+
1268
+ if self.config.use_decoder_only_language_model:
1269
+ text_outputs = self.language_model(
1270
+ input_ids=input_ids,
1271
+ attention_mask=attention_mask,
1272
+ output_attentions=output_attentions,
1273
+ output_hidden_states=output_hidden_states,
1274
+ return_dict=return_dict,
1275
+ )
1276
+ else:
1277
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
1278
+
1279
+ text_outputs = self.language_model(
1280
+ inputs_embeds=inputs_embeds,
1281
+ attention_mask=attention_mask,
1282
+ decoder_input_ids=decoder_input_ids,
1283
+ decoder_attention_mask=decoder_attention_mask,
1284
+ output_attentions=output_attentions,
1285
+ output_hidden_states=output_hidden_states,
1286
+ return_dict=return_dict,
1287
+ labels=labels,
1288
+ )
1289
+
1290
+ return text_outputs
1291
+
1292
+ @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING)
1293
+ def get_image_features(
1294
+ self,
1295
+ pixel_values: Optional[torch.FloatTensor] = None,
1296
+ output_attentions: Optional[bool] = None,
1297
+ output_hidden_states: Optional[bool] = None,
1298
+ return_dict: Optional[bool] = None,
1299
+ ):
1300
+ r"""
1301
+ Returns:
1302
+ vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`):
1303
+ The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that
1304
+ contains the image features, the pooled image features and the hidden states if
1305
+ `output_hidden_states=True`.
1306
+ Examples:
1307
+ ```python
1308
+ >>> import torch
1309
+ >>> from PIL import Image
1310
+ >>> import requests
1311
+ >>> from transformers import AutoProcessor, Blip2Model
1312
+
1313
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
1314
+
1315
+ >>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
1316
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1317
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1318
+ >>> inputs = processor(images=image, return_tensors="pt")
1319
+ >>> image_outputs = model.get_image_features(**inputs)
1320
+ ```"""
1321
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1322
+ output_hidden_states = (
1323
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1324
+ )
1325
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1326
+
1327
+ vision_outputs = self.vision_model(
1328
+ pixel_values=pixel_values,
1329
+ output_attentions=output_attentions,
1330
+ output_hidden_states=output_hidden_states,
1331
+ return_dict=return_dict,
1332
+ )
1333
+
1334
+ return vision_outputs
1335
+
1336
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
1337
+ def get_qformer_features(
1338
+ self,
1339
+ pixel_values: Optional[torch.FloatTensor] = None,
1340
+ output_attentions: Optional[bool] = None,
1341
+ output_hidden_states: Optional[bool] = None,
1342
+ return_dict: Optional[bool] = None,
1343
+ ):
1344
+ r"""
1345
+ Returns:
1346
+ vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`):
1347
+ The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that
1348
+ contains the image features, the pooled image features and the hidden states if
1349
+ `output_hidden_states=True`.
1350
+ Examples:
1351
+ ```python
1352
+ >>> import torch
1353
+ >>> from PIL import Image
1354
+ >>> import requests
1355
+ >>> from transformers import Blip2Processor, Blip2Model
1356
+
1357
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
1358
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b")
1359
+
1360
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1361
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1362
+ >>> inputs = processor(images=image, return_tensors="pt")
1363
+ >>> qformer_outputs = model.get_qformer_features(**inputs)
1364
+ ```"""
1365
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1366
+ output_hidden_states = (
1367
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1368
+ )
1369
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1370
+
1371
+ vision_outputs = self.vision_model(
1372
+ pixel_values=pixel_values,
1373
+ output_attentions=output_attentions,
1374
+ output_hidden_states=output_hidden_states,
1375
+ return_dict=return_dict,
1376
+ )
1377
+
1378
+ image_embeds = vision_outputs[0]
1379
+
1380
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
1381
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1382
+
1383
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1384
+ query_outputs = self.qformer(
1385
+ query_embeds=query_tokens,
1386
+ encoder_hidden_states=image_embeds,
1387
+ encoder_attention_mask=image_attention_mask,
1388
+ output_attentions=output_attentions,
1389
+ output_hidden_states=output_hidden_states,
1390
+ return_dict=return_dict,
1391
+ )
1392
+
1393
+ return query_outputs
1394
+
1395
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
1396
+ @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig)
1397
+ def forward(
1398
+ self,
1399
+ pixel_values: torch.FloatTensor,
1400
+ input_ids: torch.FloatTensor,
1401
+ attention_mask: Optional[torch.LongTensor] = None,
1402
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1403
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1404
+ output_attentions: Optional[bool] = None,
1405
+ output_hidden_states: Optional[bool] = None,
1406
+ labels: Optional[torch.LongTensor] = None,
1407
+ return_dict: Optional[bool] = None,
1408
+ ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]:
1409
+ r"""
1410
+ Returns:
1411
+
1412
+ Examples:
1413
+
1414
+ ```python
1415
+ >>> from PIL import Image
1416
+ >>> import requests
1417
+ >>> from transformers import Blip2Processor, Blip2Model
1418
+ >>> import torch
1419
+
1420
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
1421
+
1422
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
1423
+ >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
1424
+ >>> model.to(device) # doctest: +IGNORE_RESULT
1425
+
1426
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1427
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1428
+
1429
+ >>> prompt = "Question: how many cats are there? Answer:"
1430
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16)
1431
+
1432
+ >>> outputs = model(**inputs)
1433
+ ```"""
1434
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1435
+
1436
+ # step 1: forward the images through the vision encoder,
1437
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
1438
+ vision_outputs = self.vision_model(
1439
+ pixel_values=pixel_values,
1440
+ output_attentions=output_attentions,
1441
+ output_hidden_states=output_hidden_states,
1442
+ return_dict=return_dict,
1443
+ )
1444
+ image_embeds = vision_outputs[0]
1445
+
1446
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
1447
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1448
+
1449
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1450
+ query_outputs = self.qformer(
1451
+ query_embeds=query_tokens,
1452
+ encoder_hidden_states=image_embeds,
1453
+ encoder_attention_mask=image_attention_mask,
1454
+ output_attentions=output_attentions,
1455
+ output_hidden_states=output_hidden_states,
1456
+ return_dict=return_dict,
1457
+ )
1458
+ query_output = query_outputs[0]
1459
+
1460
+ # step 3: use the language model, conditioned on the query outputs and the prompt
1461
+ language_model_inputs = self.language_projection(query_output)
1462
+ language_model_attention_mask = torch.ones(
1463
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
1464
+ )
1465
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
1466
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds], dim=1)
1467
+
1468
+ if attention_mask is None:
1469
+ attention_mask = torch.ones_like(input_ids)
1470
+ expected_device = language_model_attention_mask.device
1471
+ attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1)
1472
+
1473
+ if self.config.use_decoder_only_language_model:
1474
+ outputs = self.language_model(
1475
+ inputs_embeds=inputs_embeds,
1476
+ attention_mask=attention_mask,
1477
+ output_attentions=output_attentions,
1478
+ output_hidden_states=output_hidden_states,
1479
+ return_dict=return_dict,
1480
+ )
1481
+ logits = outputs.logits if return_dict else outputs[0]
1482
+ loss = None
1483
+ # we compute the loss here since we need to take into account the sequence length of the query embeds
1484
+ if labels is not None:
1485
+ labels = labels.to(logits.device)
1486
+ logits = logits[:, -labels.size(1) :, :]
1487
+ # Shift so that tokens < n predict n
1488
+ shift_logits = logits[..., :-1, :].contiguous()
1489
+ shift_labels = labels[..., 1:].contiguous().to(logits.device)
1490
+
1491
+ # Flatten the tokens
1492
+ loss_fct = CrossEntropyLoss(reduction="mean")
1493
+
1494
+ loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
1495
+ else:
1496
+ outputs = self.language_model(
1497
+ inputs_embeds=inputs_embeds,
1498
+ attention_mask=attention_mask,
1499
+ decoder_input_ids=decoder_input_ids,
1500
+ decoder_attention_mask=decoder_attention_mask,
1501
+ output_attentions=output_attentions,
1502
+ output_hidden_states=output_hidden_states,
1503
+ return_dict=return_dict,
1504
+ labels=labels,
1505
+ )
1506
+ loss = outputs.loss if return_dict else outputs[0]
1507
+ logits = outputs.logits if return_dict else outputs[1]
1508
+
1509
+ if not return_dict:
1510
+ output = (logits, vision_outputs, query_outputs, outputs)
1511
+ return ((loss,) + output) if loss is not None else output
1512
+
1513
+ return Blip2ForConditionalGenerationModelOutput(
1514
+ loss=loss,
1515
+ logits=logits,
1516
+ vision_outputs=vision_outputs,
1517
+ qformer_outputs=query_outputs,
1518
+ language_model_outputs=outputs,
1519
+ )
1520
+
1521
+
1522
+ @add_start_docstrings(
1523
+ """
1524
+ BLIP-2 Model for generating text given an image and an optional text prompt. The model consists of a vision
1525
+ encoder, Querying Transformer (Q-Former) and a language model.
1526
+
1527
+ One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
1528
+ the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
1529
+
1530
+ <Tip>
1531
+
1532
+ Note that Flan-T5 checkpoints cannot be cast to float16. They are pre-trained using bfloat16.
1533
+
1534
+ </Tip>
1535
+ """,
1536
+ BLIP_2_START_DOCSTRING,
1537
+ )
1538
+ class Blip2ForConditionalGeneration(Blip2PreTrainedModel):
1539
+ config_class = Blip2Config
1540
+ main_input_name = "pixel_values"
1541
+
1542
+ def __init__(self, config: Blip2Config):
1543
+ super().__init__(config)
1544
+
1545
+ self.vision_model = Blip2VisionModel(config.vision_config)
1546
+
1547
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
1548
+ self.qformer = Blip2QFormerModel(config.qformer_config)
1549
+
1550
+ self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
1551
+ if config.use_decoder_only_language_model:
1552
+ language_model = AutoModelForCausalLM.from_config(config.text_config)
1553
+ else:
1554
+ language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
1555
+
1556
+ # Update _tied_weights_keys using the base model used.
1557
+ if language_model._tied_weights_keys is not None:
1558
+ self._tied_weights_keys = [f"language_model.{k}" for k in language_model._tied_weights_keys]
1559
+
1560
+ self.language_model = language_model
1561
+
1562
+ # Initialize weights and apply final processing
1563
+ self.post_init()
1564
+
1565
+ def get_input_embeddings(self):
1566
+ return self.language_model.get_input_embeddings()
1567
+
1568
+ def set_input_embeddings(self, value):
1569
+ self.language_model.set_input_embeddings(value)
1570
+
1571
+ def set_output_embeddings(self, new_embeddings):
1572
+ self.language_model.set_output_embeddings(new_embeddings)
1573
+
1574
+ def get_output_embeddings(self) -> nn.Module:
1575
+ return self.language_model.get_output_embeddings()
1576
+
1577
+ def get_encoder(self):
1578
+ return self.language_model.get_encoder()
1579
+
1580
+ def get_decoder(self):
1581
+ return self.language_model.get_decoder()
1582
+
1583
+ def _tie_weights(self):
1584
+ if not self.config.use_decoder_only_language_model:
1585
+ self.language_model.encoder.embed_tokens = self.language_model.shared
1586
+ self.language_model.decoder.embed_tokens = self.language_model.shared
1587
+
1588
+ def _preprocess_accelerate(self):
1589
+ r"""
1590
+ Some pre-processing hacks to make the model `accelerate` compatible. Check
1591
+ https://github.com/huggingface/transformers/pull/21707 for more details.
1592
+ """
1593
+ hf_device_map = self.hf_device_map
1594
+
1595
+ if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
1596
+ # warn users about unexpected behavior when using multi-GPU + BLIP-2 + `accelerate`.
1597
+ logger.warning(
1598
+ "The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
1599
+ " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
1600
+ " Please pass a `device_map` that contains `language_model` to remove this warning."
1601
+ " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
1602
+ " more details on creating a `device_map` for large models.",
1603
+ )
1604
+
1605
+ if hasattr(self.language_model, "_hf_hook"):
1606
+ self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
1607
+
1608
+ @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING)
1609
+ @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig)
1610
+ def forward(
1611
+ self,
1612
+ pixel_values: torch.FloatTensor,
1613
+ input_ids: torch.FloatTensor,
1614
+ attention_mask: Optional[torch.LongTensor] = None,
1615
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1616
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1617
+ output_attentions: Optional[bool] = None,
1618
+ output_hidden_states: Optional[bool] = None,
1619
+ labels: Optional[torch.LongTensor] = None,
1620
+ return_dict: Optional[bool] = None,
1621
+ ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]:
1622
+ r"""
1623
+ Returns:
1624
+
1625
+ Examples:
1626
+
1627
+ Prepare processor, model and image input
1628
+
1629
+ ```python
1630
+ >>> from PIL import Image
1631
+ >>> import requests
1632
+ >>> from transformers import Blip2Processor, Blip2ForConditionalGeneration
1633
+ >>> import torch
1634
+
1635
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
1636
+
1637
+ >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
1638
+ >>> model = Blip2ForConditionalGeneration.from_pretrained(
1639
+ ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16
1640
+ ... ) # doctest: +IGNORE_RESULT
1641
+
1642
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1643
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1644
+ ```
1645
+
1646
+ Image captioning (without providing a text prompt):
1647
+
1648
+ ```python
1649
+ >>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
1650
+
1651
+ >>> generated_ids = model.generate(**inputs)
1652
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
1653
+ >>> print(generated_text)
1654
+ two cats laying on a couch
1655
+ ```
1656
+
1657
+ Visual question answering (prompt = question):
1658
+
1659
+ ```python
1660
+ >>> prompt = "Question: how many cats are there? Answer:"
1661
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.float16)
1662
+
1663
+ >>> generated_ids = model.generate(**inputs)
1664
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
1665
+ >>> print(generated_text)
1666
+ two
1667
+ ```
1668
+
1669
+ Note that int8 inference is also supported through [bitsandbytes](https://github.com/TimDettmers/bitsandbytes).
1670
+ This greatly reduces the amount of memory used by the model while maintaining the same performance.
1671
+
1672
+ ```python
1673
+ >>> model = Blip2ForConditionalGeneration.from_pretrained(
1674
+ ... "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.bfloat16
1675
+ ... ) # doctest: +IGNORE_RESULT
1676
+
1677
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16)
1678
+
1679
+ >>> generated_ids = model.generate(**inputs)
1680
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
1681
+ >>> print(generated_text)
1682
+ two
1683
+ ```"""
1684
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1685
+
1686
+ # step 1: forward the images through the vision encoder,
1687
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
1688
+ vision_outputs = self.vision_model(
1689
+ pixel_values=pixel_values,
1690
+ output_attentions=output_attentions,
1691
+ output_hidden_states=output_hidden_states,
1692
+ return_dict=return_dict,
1693
+ )
1694
+ image_embeds = vision_outputs[0]
1695
+
1696
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
1697
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1698
+
1699
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1700
+ query_outputs = self.qformer(
1701
+ query_embeds=query_tokens,
1702
+ encoder_hidden_states=image_embeds,
1703
+ encoder_attention_mask=image_attention_mask,
1704
+ output_attentions=output_attentions,
1705
+ output_hidden_states=output_hidden_states,
1706
+ return_dict=return_dict,
1707
+ )
1708
+ query_output = query_outputs[0]
1709
+
1710
+ # step 3: use the language model, conditioned on the query outputs and the prompt
1711
+ language_model_inputs = self.language_projection(query_output)
1712
+ language_model_attention_mask = torch.ones(
1713
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
1714
+ )
1715
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
1716
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
1717
+
1718
+ if attention_mask is None:
1719
+ attention_mask = torch.ones_like(input_ids)
1720
+ expected_device = language_model_attention_mask.device
1721
+ attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1)
1722
+
1723
+ if self.config.use_decoder_only_language_model:
1724
+ outputs = self.language_model(
1725
+ inputs_embeds=inputs_embeds,
1726
+ attention_mask=attention_mask,
1727
+ output_attentions=output_attentions,
1728
+ output_hidden_states=output_hidden_states,
1729
+ return_dict=return_dict,
1730
+ )
1731
+ logits = outputs.logits if return_dict else outputs[0]
1732
+ loss = None
1733
+ # we compute the loss here since we need to take into account the sequence length of the query embeds
1734
+ if labels is not None:
1735
+ labels = labels.to(logits.device)
1736
+ logits = logits[:, -labels.size(1) :, :]
1737
+ # Shift so that tokens < n predict n
1738
+ shift_logits = logits[..., :-1, :].contiguous()
1739
+ shift_labels = labels[..., 1:].contiguous().to(logits.device)
1740
+
1741
+ # Flatten the tokens
1742
+ loss_fct = CrossEntropyLoss(reduction="mean")
1743
+
1744
+ loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
1745
+ else:
1746
+ outputs = self.language_model(
1747
+ inputs_embeds=inputs_embeds,
1748
+ attention_mask=attention_mask,
1749
+ decoder_input_ids=decoder_input_ids,
1750
+ decoder_attention_mask=decoder_attention_mask,
1751
+ output_attentions=output_attentions,
1752
+ output_hidden_states=output_hidden_states,
1753
+ return_dict=return_dict,
1754
+ labels=labels,
1755
+ )
1756
+ loss = outputs.loss if return_dict else outputs[0]
1757
+ logits = outputs.logits if return_dict else outputs[1]
1758
+
1759
+ if not return_dict:
1760
+ output = (logits, vision_outputs, query_outputs, outputs)
1761
+ return ((loss,) + output) if loss is not None else output
1762
+
1763
+ return Blip2ForConditionalGenerationModelOutput(
1764
+ loss=loss,
1765
+ logits=logits,
1766
+ vision_outputs=vision_outputs,
1767
+ qformer_outputs=query_outputs,
1768
+ language_model_outputs=outputs,
1769
+ )
1770
+
1771
+ @torch.no_grad()
1772
+ def generate(
1773
+ self,
1774
+ pixel_values: torch.FloatTensor,
1775
+ input_ids: Optional[torch.LongTensor] = None,
1776
+ attention_mask: Optional[torch.LongTensor] = None,
1777
+ **generate_kwargs,
1778
+ ) -> torch.LongTensor:
1779
+ """
1780
+ Overrides `generate` function to be able to use the model as a conditional generator.
1781
+
1782
+ Args:
1783
+ pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
1784
+ Input images to be processed.
1785
+ input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
1786
+ The sequence used as a prompt for the generation.
1787
+ attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
1788
+ Mask to avoid performing attention on padding token indices
1789
+
1790
+ Returns:
1791
+ captions (list): A list of strings of length batch_size * num_captions.
1792
+ """
1793
+ if hasattr(self, "hf_device_map"):
1794
+ # preprocess for `accelerate`
1795
+ self._preprocess_accelerate()
1796
+
1797
+ batch_size = pixel_values.shape[0]
1798
+ image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state
1799
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1800
+
1801
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1802
+ query_outputs = self.qformer(
1803
+ query_embeds=query_tokens,
1804
+ encoder_hidden_states=image_embeds,
1805
+ encoder_attention_mask=image_attention_mask,
1806
+ return_dict=True,
1807
+ )
1808
+ query_output = query_outputs.last_hidden_state
1809
+
1810
+ language_model_inputs = self.language_projection(query_output)
1811
+ language_attention_mask = torch.ones(
1812
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
1813
+ )
1814
+ if input_ids is None:
1815
+ input_ids = (
1816
+ torch.LongTensor([[self.config.text_config.bos_token_id]])
1817
+ .repeat(batch_size, 1)
1818
+ .to(image_embeds.device)
1819
+ )
1820
+ if attention_mask is None:
1821
+ attention_mask = torch.ones_like(input_ids)
1822
+ attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1)
1823
+
1824
+ # concatenate query embeddings with prompt embeddings
1825
+ inputs_embeds = self.get_input_embeddings()(input_ids)
1826
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
1827
+
1828
+ # add image_embeds length to max_length, so that the final max_length in counted only on token embeds
1829
+ # -1 is to account for the prepended BOS after `generate.`
1830
+ # TODO (joao, raushan): refactor `generate` to avoid these operations with VLMs
1831
+ if not self.language_model.config.is_encoder_decoder:
1832
+ generate_kwargs["max_length"] = generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1
1833
+ generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1]
1834
+
1835
+ outputs = self.language_model.generate(
1836
+ inputs_embeds=inputs_embeds,
1837
+ attention_mask=attention_mask,
1838
+ **generate_kwargs,
1839
+ )
1840
+
1841
+ # this is a temporary workaround to be consistent with other generation models and
1842
+ # have BOS as the first token, even though under the hood we are calling LM with embeds
1843
+ if not self.language_model.config.is_encoder_decoder:
1844
+ bos_tokens = (
1845
+ torch.LongTensor([[self.config.text_config.bos_token_id]])
1846
+ .repeat(batch_size, 1)
1847
+ .to(image_embeds.device)
1848
+ )
1849
+ if not isinstance(outputs, torch.Tensor):
1850
+ outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1)
1851
+ else:
1852
+ outputs = torch.cat([bos_tokens, outputs], dim=-1)
1853
+ return outputs
llmeval-env/lib/python3.10/site-packages/transformers/models/blip_2/processing_blip_2.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for BLIP-2.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...image_utils import ImageInput
22
+ from ...processing_utils import ProcessorMixin
23
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
24
+ from ...utils import TensorType
25
+
26
+
27
+ class Blip2Processor(ProcessorMixin):
28
+ r"""
29
+ Constructs a BLIP-2 processor which wraps a BLIP image processor and an OPT/T5 tokenizer into a single processor.
30
+
31
+ [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the docstring
32
+ of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor (`BlipImageProcessor`):
36
+ An instance of [`BlipImageProcessor`]. The image processor is a required input.
37
+ tokenizer (`AutoTokenizer`):
38
+ An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "BlipImageProcessor"
43
+ tokenizer_class = "AutoTokenizer"
44
+
45
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.__init__
46
+ def __init__(self, image_processor, tokenizer):
47
+ tokenizer.return_token_type_ids = False
48
+ super().__init__(image_processor, tokenizer)
49
+ self.current_processor = self.image_processor
50
+
51
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.__call__
52
+ def __call__(
53
+ self,
54
+ images: ImageInput = None,
55
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
56
+ add_special_tokens: bool = True,
57
+ padding: Union[bool, str, PaddingStrategy] = False,
58
+ truncation: Union[bool, str, TruncationStrategy] = None,
59
+ max_length: Optional[int] = None,
60
+ stride: int = 0,
61
+ pad_to_multiple_of: Optional[int] = None,
62
+ return_attention_mask: Optional[bool] = None,
63
+ return_overflowing_tokens: bool = False,
64
+ return_special_tokens_mask: bool = False,
65
+ return_offsets_mapping: bool = False,
66
+ return_token_type_ids: bool = False,
67
+ return_length: bool = False,
68
+ verbose: bool = True,
69
+ return_tensors: Optional[Union[str, TensorType]] = None,
70
+ **kwargs,
71
+ ) -> BatchEncoding:
72
+ """
73
+ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
74
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
75
+
76
+ Please refer to the docstring of the above two methods for more information.
77
+ """
78
+ if images is None and text is None:
79
+ raise ValueError("You have to specify either images or text.")
80
+
81
+ # Get only text
82
+ if images is None:
83
+ self.current_processor = self.tokenizer
84
+ text_encoding = self.tokenizer(
85
+ text=text,
86
+ add_special_tokens=add_special_tokens,
87
+ padding=padding,
88
+ truncation=truncation,
89
+ max_length=max_length,
90
+ stride=stride,
91
+ pad_to_multiple_of=pad_to_multiple_of,
92
+ return_attention_mask=return_attention_mask,
93
+ return_overflowing_tokens=return_overflowing_tokens,
94
+ return_special_tokens_mask=return_special_tokens_mask,
95
+ return_offsets_mapping=return_offsets_mapping,
96
+ return_token_type_ids=return_token_type_ids,
97
+ return_length=return_length,
98
+ verbose=verbose,
99
+ return_tensors=return_tensors,
100
+ **kwargs,
101
+ )
102
+ return text_encoding
103
+
104
+ # add pixel_values
105
+ encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
106
+
107
+ if text is not None:
108
+ text_encoding = self.tokenizer(
109
+ text=text,
110
+ add_special_tokens=add_special_tokens,
111
+ padding=padding,
112
+ truncation=truncation,
113
+ max_length=max_length,
114
+ stride=stride,
115
+ pad_to_multiple_of=pad_to_multiple_of,
116
+ return_attention_mask=return_attention_mask,
117
+ return_overflowing_tokens=return_overflowing_tokens,
118
+ return_special_tokens_mask=return_special_tokens_mask,
119
+ return_offsets_mapping=return_offsets_mapping,
120
+ return_token_type_ids=return_token_type_ids,
121
+ return_length=return_length,
122
+ verbose=verbose,
123
+ return_tensors=return_tensors,
124
+ **kwargs,
125
+ )
126
+ else:
127
+ text_encoding = None
128
+
129
+ if text_encoding is not None:
130
+ encoding_image_processor.update(text_encoding)
131
+
132
+ return encoding_image_processor
133
+
134
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
135
+ def batch_decode(self, *args, **kwargs):
136
+ """
137
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
138
+ refer to the docstring of this method for more information.
139
+ """
140
+ return self.tokenizer.batch_decode(*args, **kwargs)
141
+
142
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
143
+ def decode(self, *args, **kwargs):
144
+ """
145
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
146
+ the docstring of this method for more information.
147
+ """
148
+ return self.tokenizer.decode(*args, **kwargs)
149
+
150
+ @property
151
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
152
+ def model_input_names(self):
153
+ tokenizer_input_names = self.tokenizer.model_input_names
154
+ image_processor_input_names = self.image_processor.model_input_names
155
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc ADDED
Binary file (81.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_pix2struct": [
21
+ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "Pix2StructConfig",
23
+ "Pix2StructTextConfig",
24
+ "Pix2StructVisionConfig",
25
+ ],
26
+ "processing_pix2struct": ["Pix2StructProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["image_processing_pix2struct"] = ["Pix2StructImageProcessor"]
36
+
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_pix2struct"] = [
45
+ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "Pix2StructPreTrainedModel",
47
+ "Pix2StructForConditionalGeneration",
48
+ "Pix2StructVisionModel",
49
+ "Pix2StructTextModel",
50
+ ]
51
+
52
+ if TYPE_CHECKING:
53
+ from .configuration_pix2struct import (
54
+ PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
55
+ Pix2StructConfig,
56
+ Pix2StructTextConfig,
57
+ Pix2StructVisionConfig,
58
+ )
59
+ from .processing_pix2struct import Pix2StructProcessor
60
+
61
+ try:
62
+ if not is_vision_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ from .image_processing_pix2struct import Pix2StructImageProcessor
68
+
69
+ try:
70
+ if not is_torch_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .modeling_pix2struct import (
76
+ PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
77
+ Pix2StructForConditionalGeneration,
78
+ Pix2StructPreTrainedModel,
79
+ Pix2StructTextModel,
80
+ Pix2StructVisionModel,
81
+ )
82
+
83
+ else:
84
+ import sys
85
+
86
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/configuration_pix2struct.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/image_processing_pix2struct.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/modeling_pix2struct.cpython-310.pyc ADDED
Binary file (52.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/__pycache__/processing_pix2struct.cpython-310.pyc ADDED
Binary file (4.73 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/configuration_pix2struct.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Pix2Struct model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class Pix2StructTextConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`Pix2StructTextModel`]. It is used to instantiate
33
+ a Pix2Struct text model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the Pix2Struct text decoder used by
35
+ the [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50244):
42
+ Vocabulary size of the `Pix2Struct` text model. Defines the number of different tokens that can be
43
+ represented by the `inputs_ids` passed when calling [`Pix2StructTextModel`].
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer.
46
+ d_kv (`int`, *optional*, defaults to 64):
47
+ Dimensionality of the key, query, value projections in each attention head.
48
+ d_ff (`int`, *optional*, defaults to 2048):
49
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ num_layers (`int`, *optional*, defaults to 12):
51
+ Number of hidden layers in the Transformer encoder.
52
+ num_heads (`int`, *optional*, defaults to 12):
53
+ Number of attention heads for each attention layer in the Transformer encoder.
54
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
55
+ The number of buckets to use for each attention layer.
56
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
57
+ The maximum distance of the longer sequences for the bucket separation.
58
+ dropout_rate (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-6):
61
+ The epsilon used by the layer normalization layers.
62
+ initializer_factor (`float`, *optional*, defaults to 1.0):
63
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
64
+ testing).
65
+ dense_act_fn (`Union[Callable, str]`, *optional*, defaults to `"gelu_new"`):
66
+ The non-linear activation function (function or string).
67
+ decoder_start_token_id (`int`, *optional*, defaults to 0):
68
+ The id of the `decoder_start_token_id` token.
69
+ use_cache (`bool`, *optional*, defaults to `False`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models).
71
+ pad_token_id (`int`, *optional*, defaults to 0):
72
+ The id of the `padding` token.
73
+ eos_token_id (`int`, *optional*, defaults to 1):
74
+ The id of the `end-of-sequence` token.
75
+
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import Pix2StructTextConfig, Pix2StructTextModel
80
+
81
+ >>> # Initializing a Pix2StructTextConfig with google/pix2struct-base style configuration
82
+ >>> configuration = Pix2StructTextConfig()
83
+
84
+ >>> # Initializing a Pix2StructTextModel (with random weights) from the google/pix2struct-base style configuration
85
+ >>> model = Pix2StructTextModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "pix2struct_text_model"
92
+ keys_to_ignore_at_inference = ["past_key_values"]
93
+ attribute_map = {
94
+ "hidden_size": "hidden_size",
95
+ "num_attention_heads": "num_heads",
96
+ "num_hidden_layers": "num_layers",
97
+ }
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_size=50244,
102
+ hidden_size=768,
103
+ d_kv=64,
104
+ d_ff=2048,
105
+ num_layers=12,
106
+ num_heads=12,
107
+ relative_attention_num_buckets=32,
108
+ relative_attention_max_distance=128,
109
+ dropout_rate=0.1,
110
+ layer_norm_epsilon=1e-6,
111
+ initializer_factor=1.0,
112
+ dense_act_fn="gelu_new",
113
+ decoder_start_token_id=0,
114
+ use_cache=False,
115
+ pad_token_id=0,
116
+ eos_token_id=1,
117
+ tie_word_embeddings=False,
118
+ is_decoder=True,
119
+ **kwargs,
120
+ ):
121
+ self.vocab_size = vocab_size
122
+ self.hidden_size = hidden_size
123
+ self.d_kv = d_kv
124
+ self.d_ff = d_ff
125
+ self.num_layers = num_layers
126
+ self.num_heads = num_heads
127
+ self.relative_attention_num_buckets = relative_attention_num_buckets
128
+ self.relative_attention_max_distance = relative_attention_max_distance
129
+ self.dropout_rate = dropout_rate
130
+ self.layer_norm_epsilon = layer_norm_epsilon
131
+ self.initializer_factor = initializer_factor
132
+ self.use_cache = use_cache
133
+
134
+ self.eos_token_id = eos_token_id
135
+ self.decoder_start_token_id = decoder_start_token_id
136
+
137
+ # for backwards compatibility
138
+ self.dense_act_fn = dense_act_fn
139
+
140
+ super().__init__(
141
+ pad_token_id=pad_token_id,
142
+ eos_token_id=eos_token_id,
143
+ decoder_start_token_id=decoder_start_token_id,
144
+ tie_word_embeddings=tie_word_embeddings,
145
+ is_decoder=is_decoder,
146
+ **kwargs,
147
+ )
148
+
149
+ @classmethod
150
+ def from_pretrained(
151
+ cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs
152
+ ) -> "PretrainedConfig":
153
+ cls._set_token_in_kwargs(kwargs)
154
+
155
+ config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs)
156
+
157
+ # get the text config dict if we are loading from Pix2StructConfig
158
+ if config_dict.get("model_type") == "pix2struct":
159
+ config_dict = config_dict["text_config"]
160
+
161
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
162
+ logger.warning(
163
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
164
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
165
+ )
166
+
167
+ return cls.from_dict(config_dict, **kwargs)
168
+
169
+
170
+ class Pix2StructVisionConfig(PretrainedConfig):
171
+ r"""
172
+ This is the configuration class to store the configuration of a [`Pix2StructVisionModel`]. It is used to
173
+ instantiate a Pix2Struct vision model according to the specified arguments, defining the model architecture.
174
+ Instantiating a configuration defaults will yield a similar configuration to that of the Pix2Struct-base
175
+ [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
176
+
177
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
178
+ documentation from [`PretrainedConfig`] for more information.
179
+
180
+ Args:
181
+ hidden_size (`int`, *optional*, defaults to 768):
182
+ Dimensionality of the encoder layers and the pooler layer.
183
+ patch_embed_hidden_size (`int`, *optional*, defaults to 768):
184
+ Dimensionality of the input patch_embedding layer in the Transformer encoder.
185
+ d_ff (`int`, *optional*, defaults to 2048):
186
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
187
+ d_kv (`int`, *optional*, defaults to 64):
188
+ Dimensionality of the key, query, value projections per attention head.
189
+ num_hidden_layers (`int`, *optional*, defaults to 12):
190
+ Number of hidden layers in the Transformer encoder.
191
+ num_attention_heads (`int`, *optional*, defaults to 12):
192
+ Number of attention heads for each attention layer in the Transformer encoder.
193
+ dense_act_fn (`str` or `function`, *optional*, defaults to `"gelu_new"`):
194
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
195
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
196
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
197
+ The epsilon used by the layer normalization layers.
198
+ dropout_rate (`float`, *optional*, defaults to 0.0):
199
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
200
+ attention_dropout (`float`, *optional*, defaults to 0.0):
201
+ The dropout ratio for the attention probabilities.
202
+ initializer_range (`float`, *optional*, defaults to 1e-10):
203
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
204
+ initializer_factor (`float`, *optional*, defaults to 1.0):
205
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
206
+ testing).
207
+ seq_len (`int`, *optional*, defaults to 4096):
208
+ Maximum sequence length (here number of patches) supported by the model.
209
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
210
+ The number of buckets to use for each attention layer.
211
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
212
+ The maximum distance (in tokens) to use for each attention layer.
213
+
214
+ Example:
215
+
216
+ ```python
217
+ >>> from transformers import Pix2StructVisionConfig, Pix2StructVisionModel
218
+
219
+ >>> # Initializing a Pix2StructVisionConfig with google/pix2struct-base style configuration
220
+ >>> configuration = Pix2StructVisionConfig()
221
+
222
+ >>> # Initializing a Pix2StructVisionModel (with random weights) from the google/pix2struct-base style configuration
223
+ >>> model = Pix2StructVisionModel(configuration)
224
+
225
+ >>> # Accessing the model configuration
226
+ >>> configuration = model.config
227
+ ```"""
228
+
229
+ model_type = "pix2struct_vision_model"
230
+
231
+ def __init__(
232
+ self,
233
+ hidden_size=768,
234
+ patch_embed_hidden_size=768,
235
+ d_ff=2048,
236
+ d_kv=64,
237
+ num_hidden_layers=12,
238
+ num_attention_heads=12,
239
+ dense_act_fn="gelu_new",
240
+ layer_norm_eps=1e-6,
241
+ dropout_rate=0.0,
242
+ attention_dropout=0.0,
243
+ initializer_range=1e-10,
244
+ initializer_factor=1.0,
245
+ seq_len=4096,
246
+ relative_attention_num_buckets=32,
247
+ relative_attention_max_distance=128,
248
+ **kwargs,
249
+ ):
250
+ super().__init__(**kwargs)
251
+
252
+ self.hidden_size = hidden_size
253
+ self.patch_embed_hidden_size = patch_embed_hidden_size
254
+ self.d_ff = d_ff
255
+ self.dropout_rate = dropout_rate
256
+ self.num_hidden_layers = num_hidden_layers
257
+ self.num_attention_heads = num_attention_heads
258
+ self.initializer_range = initializer_range
259
+ self.initializer_factor = initializer_factor
260
+ self.attention_dropout = attention_dropout
261
+ self.layer_norm_eps = layer_norm_eps
262
+ self.dense_act_fn = dense_act_fn
263
+ self.seq_len = seq_len
264
+ self.relative_attention_num_buckets = relative_attention_num_buckets
265
+ self.relative_attention_max_distance = relative_attention_max_distance
266
+ self.d_kv = d_kv
267
+
268
+ @classmethod
269
+ def from_pretrained(
270
+ cls, pretrainehidden_size_name_or_path: Union[str, os.PathLike], **kwargs
271
+ ) -> "PretrainedConfig":
272
+ cls._set_token_in_kwargs(kwargs)
273
+
274
+ config_dict, kwargs = cls.get_config_dict(pretrainehidden_size_name_or_path, **kwargs)
275
+
276
+ # get the vision config dict if we are loading from Pix2StructConfig
277
+ if config_dict.get("model_type") == "pix2struct":
278
+ config_dict = config_dict["vision_config"]
279
+
280
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
281
+ logger.warning(
282
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
283
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
284
+ )
285
+
286
+ return cls.from_dict(config_dict, **kwargs)
287
+
288
+
289
+ class Pix2StructConfig(PretrainedConfig):
290
+ r"""
291
+ [`Pix2StructConfig`] is the configuration class to store the configuration of a
292
+ [`Pix2StructForConditionalGeneration`]. It is used to instantiate a Pix2Struct model according to the specified
293
+ arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will
294
+ yield a similar configuration to that of the Pix2Struct-base
295
+ [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.
296
+
297
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
298
+ documentation from [`PretrainedConfig`] for more information.
299
+
300
+ Args:
301
+ text_config (`dict`, *optional*):
302
+ Dictionary of configuration options used to initialize [`Pix2StructTextConfig`].
303
+ vision_config (`dict`, *optional*):
304
+ Dictionary of configuration options used to initialize [`Pix2StructVisionConfig`].
305
+ initializer_factor (`float`, *optional*, defaults to 1.0):
306
+ Factor to multiply the initialization range with.
307
+ initializer_range (`float`, *optional*, defaults to 0.02):
308
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
309
+ is_vqa (`bool`, *optional*, defaults to `False`):
310
+ Whether the model has been fine-tuned for VQA or not.
311
+ kwargs (*optional*):
312
+ Dictionary of keyword arguments.
313
+
314
+ Example:
315
+
316
+ ```python
317
+ >>> from transformers import Pix2StructConfig, Pix2StructForConditionalGeneration
318
+
319
+ >>> # Initializing a Pix2StructConfig with google/pix2struct-base style configuration
320
+ >>> configuration = Pix2StructConfig()
321
+
322
+ >>> # Initializing a Pix2StructForConditionalGeneration (with random weights) from the google/pix2struct-base style configuration
323
+ >>> model = Pix2StructForConditionalGeneration(configuration)
324
+
325
+ >>> # Accessing the model configuration
326
+ >>> configuration = model.config
327
+
328
+ >>> # We can also initialize a Pix2StructConfig from a Pix2StructTextConfig and a Pix2StructVisionConfig
329
+
330
+ >>> # Initializing a Pix2Struct text and Pix2Struct vision configuration
331
+ >>> config_text = Pix2StructTextConfig()
332
+ >>> config_vision = Pix2StructVisionConfig()
333
+
334
+ >>> config = Pix2StructConfig.from_text_vision_configs(config_text, config_vision)
335
+ ```"""
336
+
337
+ model_type = "pix2struct"
338
+
339
+ def __init__(
340
+ self,
341
+ text_config=None,
342
+ vision_config=None,
343
+ initializer_factor=1.0,
344
+ initializer_range=0.02,
345
+ is_vqa=False,
346
+ tie_word_embeddings=False,
347
+ is_encoder_decoder=True,
348
+ **kwargs,
349
+ ):
350
+ super().__init__(tie_word_embeddings=tie_word_embeddings, is_encoder_decoder=is_encoder_decoder, **kwargs)
351
+
352
+ if text_config is None:
353
+ text_config = {}
354
+ logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values.")
355
+
356
+ if vision_config is None:
357
+ vision_config = {}
358
+ logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.")
359
+
360
+ self.text_config = Pix2StructTextConfig(**text_config)
361
+ self.vision_config = Pix2StructVisionConfig(**vision_config)
362
+
363
+ self.decoder_start_token_id = self.text_config.decoder_start_token_id
364
+ self.pad_token_id = self.text_config.pad_token_id
365
+ self.eos_token_id = self.text_config.eos_token_id
366
+
367
+ self.initializer_factor = initializer_factor
368
+ self.initializer_range = initializer_range
369
+
370
+ self.text_config.initializer_range = self.initializer_range
371
+ self.vision_config.initializer_range = self.initializer_range
372
+
373
+ self.is_vqa = is_vqa
374
+
375
+ @classmethod
376
+ def from_text_vision_configs(
377
+ cls, text_config: Pix2StructTextConfig, vision_config: Pix2StructVisionConfig, **kwargs
378
+ ):
379
+ r"""
380
+ Instantiate a [`Pix2StructConfig`] (or a derived class) from pix2struct text model configuration and pix2struct
381
+ vision model configuration.
382
+
383
+ Returns:
384
+ [`Pix2StructConfig`]: An instance of a configuration object
385
+ """
386
+
387
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/convert_pix2struct_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import argparse
16
+ import os
17
+ import re
18
+
19
+ import torch
20
+ from flax.traverse_util import flatten_dict
21
+ from t5x import checkpoints
22
+
23
+ from transformers import (
24
+ AutoTokenizer,
25
+ Pix2StructConfig,
26
+ Pix2StructForConditionalGeneration,
27
+ Pix2StructImageProcessor,
28
+ Pix2StructProcessor,
29
+ Pix2StructTextConfig,
30
+ Pix2StructVisionConfig,
31
+ )
32
+
33
+
34
+ def get_flax_param(t5x_checkpoint_path):
35
+ flax_params = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
36
+ flax_params = flatten_dict(flax_params)
37
+ return flax_params
38
+
39
+
40
+ def rename_and_convert_flax_params(flax_dict):
41
+ converted_dict = {}
42
+
43
+ CONVERSION_MAPPING = {
44
+ "token_embedder": "embeddings",
45
+ "encoder_norm": "layernorm",
46
+ "kernel": "weight",
47
+ ".out": ".output",
48
+ "scale": "weight",
49
+ "embedders_0.pos_embedding": "row_embedder.weight",
50
+ "embedders_1.pos_embedding": "column_embedder.weight",
51
+ }
52
+
53
+ DECODER_CONVERSION_MAPPING = {
54
+ "query": "attention.query",
55
+ "key": "attention.key",
56
+ "value": "attention.value",
57
+ "output.dense": "output",
58
+ "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
59
+ "pre_self_attention_layer_norm": "self_attention.layer_norm",
60
+ "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
61
+ "mlp.": "mlp.DenseReluDense.",
62
+ "pre_mlp_layer_norm": "mlp.layer_norm",
63
+ "self_attention.o": "self_attention.attention.o",
64
+ "decoder.embeddings.embedding": "decoder.embed_tokens.weight",
65
+ "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
66
+ "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
67
+ "decoder.logits_dense.weight": "decoder.lm_head.weight",
68
+ }
69
+
70
+ for key in flax_dict.keys():
71
+ if "target" in key:
72
+ # remove the first prefix from the key
73
+ new_key = ".".join(key[1:])
74
+
75
+ # rename the key
76
+ for old, new in CONVERSION_MAPPING.items():
77
+ new_key = new_key.replace(old, new)
78
+
79
+ if "decoder" in new_key:
80
+ for old, new in DECODER_CONVERSION_MAPPING.items():
81
+ new_key = new_key.replace(old, new)
82
+
83
+ if "layers" in new_key and "decoder" not in new_key:
84
+ # use regex to replace the layer number
85
+ new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key)
86
+ new_key = new_key.replace("encoder", "encoder.encoder")
87
+
88
+ elif "layers" in new_key and "decoder" in new_key:
89
+ # use regex to replace the layer number
90
+ new_key = re.sub(r"layers_(\d+)", r"layer.\1", new_key)
91
+
92
+ converted_dict[new_key] = flax_dict[key]
93
+
94
+ converted_torch_dict = {}
95
+ # convert converted_dict into torch format
96
+ for key in converted_dict.keys():
97
+ if ("embed_tokens" not in key) and ("embedder" not in key):
98
+ converted_torch_dict[key] = torch.from_numpy(converted_dict[key].T)
99
+ else:
100
+ converted_torch_dict[key] = torch.from_numpy(converted_dict[key])
101
+
102
+ return converted_torch_dict
103
+
104
+
105
+ def convert_pix2struct_original_pytorch_checkpoint_to_hf(
106
+ t5x_checkpoint_path, pytorch_dump_folder_path, use_large=False, is_vqa=False
107
+ ):
108
+ flax_params = get_flax_param(t5x_checkpoint_path)
109
+
110
+ if not use_large:
111
+ encoder_config = Pix2StructVisionConfig()
112
+ decoder_config = Pix2StructTextConfig()
113
+ else:
114
+ encoder_config = Pix2StructVisionConfig(
115
+ hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18
116
+ )
117
+ decoder_config = Pix2StructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18)
118
+ config = Pix2StructConfig(
119
+ vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=is_vqa
120
+ )
121
+
122
+ model = Pix2StructForConditionalGeneration(config)
123
+
124
+ torch_params = rename_and_convert_flax_params(flax_params)
125
+ model.load_state_dict(torch_params)
126
+
127
+ tok = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer")
128
+ image_processor = Pix2StructImageProcessor()
129
+ processor = Pix2StructProcessor(image_processor=image_processor, tokenizer=tok)
130
+
131
+ if use_large:
132
+ processor.image_processor.max_patches = 4096
133
+
134
+ processor.image_processor.is_vqa = True
135
+
136
+ # mkdir if needed
137
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
138
+
139
+ model.save_pretrained(pytorch_dump_folder_path)
140
+ processor.save_pretrained(pytorch_dump_folder_path)
141
+
142
+ print("Model saved in {}".format(pytorch_dump_folder_path))
143
+
144
+
145
+ if __name__ == "__main__":
146
+ parser = argparse.ArgumentParser()
147
+ parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
148
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
149
+ parser.add_argument("--use_large", action="store_true", help="Use large model.")
150
+ parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
151
+ args = parser.parse_args()
152
+
153
+ convert_pix2struct_original_pytorch_checkpoint_to_hf(
154
+ args.t5x_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
155
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/image_processing_pix2struct.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Pix2Struct."""
16
+ import io
17
+ import math
18
+ from typing import Dict, Optional, Union
19
+
20
+ import numpy as np
21
+ from huggingface_hub import hf_hub_download
22
+
23
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
24
+ from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
25
+ from ...image_utils import (
26
+ ChannelDimension,
27
+ ImageInput,
28
+ get_image_size,
29
+ infer_channel_dimension_format,
30
+ make_list_of_images,
31
+ to_numpy_array,
32
+ valid_images,
33
+ )
34
+ from ...utils import TensorType, is_torch_available, is_vision_available, logging
35
+ from ...utils.import_utils import requires_backends
36
+
37
+
38
+ if is_vision_available():
39
+ import textwrap
40
+
41
+ from PIL import Image, ImageDraw, ImageFont
42
+
43
+ if is_torch_available():
44
+ import torch
45
+
46
+ logger = logging.get_logger(__name__)
47
+ DEFAULT_FONT_PATH = "ybelkada/fonts"
48
+
49
+
50
+ # adapted from: https://discuss.pytorch.org/t/tf-image-extract-patches-in-pytorch/171409/2
51
+ def torch_extract_patches(image_tensor, patch_height, patch_width):
52
+ """
53
+ Utiliy function to extract patches from a given image tensor. Returns a tensor of shape (1, `patch_height`,
54
+ `patch_width`, `num_channels`x `patch_height` x `patch_width`)
55
+
56
+ Args:
57
+ image_tensor (torch.Tensor):
58
+ The image tensor to extract patches from.
59
+ patch_height (int):
60
+ The height of the patches to extract.
61
+ patch_width (int):
62
+ The width of the patches to extract.
63
+ """
64
+ requires_backends(torch_extract_patches, ["torch"])
65
+
66
+ image_tensor = image_tensor.unsqueeze(0)
67
+ patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width))
68
+ patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1)
69
+ patches = patches.permute(0, 4, 2, 3, 1).reshape(
70
+ image_tensor.size(2) // patch_height,
71
+ image_tensor.size(3) // patch_width,
72
+ image_tensor.size(1) * patch_height * patch_width,
73
+ )
74
+ return patches.unsqueeze(0)
75
+
76
+
77
+ # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L106
78
+ def render_text(
79
+ text: str,
80
+ text_size: int = 36,
81
+ text_color: str = "black",
82
+ background_color: str = "white",
83
+ left_padding: int = 5,
84
+ right_padding: int = 5,
85
+ top_padding: int = 5,
86
+ bottom_padding: int = 5,
87
+ font_bytes: Optional[bytes] = None,
88
+ font_path: Optional[str] = None,
89
+ ) -> Image.Image:
90
+ """
91
+ Render text. This script is entirely adapted from the original script that can be found here:
92
+ https://github.com/google-research/pix2struct/blob/main/pix2struct/preprocessing/preprocessing_utils.py
93
+
94
+ Args:
95
+ text (`str`, *optional*, defaults to ):
96
+ Text to render.
97
+ text_size (`int`, *optional*, defaults to 36):
98
+ Size of the text.
99
+ text_color (`str`, *optional*, defaults to `"black"`):
100
+ Color of the text.
101
+ background_color (`str`, *optional*, defaults to `"white"`):
102
+ Color of the background.
103
+ left_padding (`int`, *optional*, defaults to 5):
104
+ Padding on the left.
105
+ right_padding (`int`, *optional*, defaults to 5):
106
+ Padding on the right.
107
+ top_padding (`int`, *optional*, defaults to 5):
108
+ Padding on the top.
109
+ bottom_padding (`int`, *optional*, defaults to 5):
110
+ Padding on the bottom.
111
+ font_bytes (`bytes`, *optional*):
112
+ Bytes of the font to use. If `None`, the default font will be used.
113
+ font_path (`str`, *optional*):
114
+ Path to the font to use. If `None`, the default font will be used.
115
+ """
116
+ requires_backends(render_text, "vision")
117
+ # Add new lines so that each line is no more than 80 characters.
118
+
119
+ wrapper = textwrap.TextWrapper(width=80)
120
+ lines = wrapper.wrap(text=text)
121
+ wrapped_text = "\n".join(lines)
122
+
123
+ if font_bytes is not None and font_path is None:
124
+ font = io.BytesIO(font_bytes)
125
+ elif font_path is not None:
126
+ font = font_path
127
+ else:
128
+ font = hf_hub_download(DEFAULT_FONT_PATH, "Arial.TTF")
129
+ font = ImageFont.truetype(font, encoding="UTF-8", size=text_size)
130
+
131
+ # Use a temporary canvas to determine the width and height in pixels when
132
+ # rendering the text.
133
+ temp_draw = ImageDraw.Draw(Image.new("RGB", (1, 1), background_color))
134
+ _, _, text_width, text_height = temp_draw.textbbox((0, 0), wrapped_text, font)
135
+
136
+ # Create the actual image with a bit of padding around the text.
137
+ image_width = text_width + left_padding + right_padding
138
+ image_height = text_height + top_padding + bottom_padding
139
+ image = Image.new("RGB", (image_width, image_height), background_color)
140
+ draw = ImageDraw.Draw(image)
141
+ draw.text(xy=(left_padding, top_padding), text=wrapped_text, fill=text_color, font=font)
142
+ return image
143
+
144
+
145
+ # Adapted from https://github.com/google-research/pix2struct/blob/0e1779af0f4db4b652c1d92b3bbd2550a7399123/pix2struct/preprocessing/preprocessing_utils.py#L87
146
+ def render_header(
147
+ image: np.ndarray, header: str, input_data_format: Optional[Union[str, ChildProcessError]] = None, **kwargs
148
+ ):
149
+ """
150
+ Renders the input text as a header on the input image.
151
+
152
+ Args:
153
+ image (`np.ndarray`):
154
+ The image to render the header on.
155
+ header (`str`):
156
+ The header text.
157
+ data_format (`Union[ChannelDimension, str]`, *optional*):
158
+ The data format of the image. Can be either "ChannelDimension.channels_first" or
159
+ "ChannelDimension.channels_last".
160
+
161
+ Returns:
162
+ `np.ndarray`: The image with the header rendered.
163
+ """
164
+ requires_backends(render_header, "vision")
165
+
166
+ # Convert to PIL image if necessary
167
+ image = to_pil_image(image, input_data_format=input_data_format)
168
+
169
+ header_image = render_text(header, **kwargs)
170
+ new_width = max(header_image.width, image.width)
171
+
172
+ new_height = int(image.height * (new_width / image.width))
173
+ new_header_height = int(header_image.height * (new_width / header_image.width))
174
+
175
+ new_image = Image.new("RGB", (new_width, new_height + new_header_height), "white")
176
+ new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0))
177
+ new_image.paste(image.resize((new_width, new_height)), (0, new_header_height))
178
+
179
+ # Convert back to the original framework if necessary
180
+ new_image = to_numpy_array(new_image)
181
+
182
+ if infer_channel_dimension_format(new_image) == ChannelDimension.LAST:
183
+ new_image = to_channel_dimension_format(new_image, ChannelDimension.LAST)
184
+
185
+ return new_image
186
+
187
+
188
+ class Pix2StructImageProcessor(BaseImageProcessor):
189
+ r"""
190
+ Constructs a Pix2Struct image processor.
191
+
192
+ Args:
193
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
194
+ Whether to convert the image to RGB.
195
+ do_normalize (`bool`, *optional*, defaults to `True`):
196
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
197
+ method. According to Pix2Struct paper and code, the image is normalized with its own mean and standard
198
+ deviation.
199
+ patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`):
200
+ The patch size to use for the image. According to Pix2Struct paper and code, the patch size is 16x16.
201
+ max_patches (`int`, *optional*, defaults to 2048):
202
+ The maximum number of patches to extract from the image as per the [Pix2Struct
203
+ paper](https://arxiv.org/pdf/2210.03347.pdf).
204
+ is_vqa (`bool`, *optional*, defaults to `False`):
205
+ Whether or not the image processor is for the VQA task. If `True` and `header_text` is passed in, text is
206
+ rendered onto the input images.
207
+ """
208
+
209
+ model_input_names = ["flattened_patches"]
210
+
211
+ def __init__(
212
+ self,
213
+ do_convert_rgb: bool = True,
214
+ do_normalize: bool = True,
215
+ patch_size: Dict[str, int] = None,
216
+ max_patches: int = 2048,
217
+ is_vqa: bool = False,
218
+ **kwargs,
219
+ ) -> None:
220
+ super().__init__(**kwargs)
221
+ self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
222
+ self.do_normalize = do_normalize
223
+ self.do_convert_rgb = do_convert_rgb
224
+ self.max_patches = max_patches
225
+ self.is_vqa = is_vqa
226
+
227
+ def extract_flattened_patches(
228
+ self,
229
+ image: np.ndarray,
230
+ max_patches: int,
231
+ patch_size: dict,
232
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
233
+ **kwargs,
234
+ ) -> np.ndarray:
235
+ """
236
+ Extract flattened patches from an image.
237
+
238
+ Args:
239
+ image (`np.ndarray`):
240
+ Image to extract flattened patches from.
241
+ max_patches (`int`):
242
+ Maximum number of patches to extract.
243
+ patch_size (`dict`):
244
+ Dictionary containing the patch height and width.
245
+
246
+ Returns:
247
+ result (`np.ndarray`):
248
+ A sequence of `max_patches` flattened patches.
249
+ """
250
+ requires_backends(self.extract_flattened_patches, "torch")
251
+
252
+ # convert to torch
253
+ image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format)
254
+ image = torch.from_numpy(image)
255
+
256
+ patch_height, patch_width = patch_size["height"], patch_size["width"]
257
+ image_height, image_width = get_image_size(image, ChannelDimension.FIRST)
258
+
259
+ # maximize scale s.t.
260
+ scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
261
+ num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1)
262
+ num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1)
263
+ resized_height = max(num_feasible_rows * patch_height, 1)
264
+ resized_width = max(num_feasible_cols * patch_width, 1)
265
+
266
+ image = torch.nn.functional.interpolate(
267
+ image.unsqueeze(0),
268
+ size=(resized_height, resized_width),
269
+ mode="bilinear",
270
+ align_corners=False,
271
+ antialias=True,
272
+ ).squeeze(0)
273
+
274
+ # [1, rows, columns, patch_height * patch_width * image_channels]
275
+ patches = torch_extract_patches(image, patch_height, patch_width)
276
+
277
+ patches_shape = patches.shape
278
+ rows = patches_shape[1]
279
+ columns = patches_shape[2]
280
+ depth = patches_shape[3]
281
+
282
+ # [rows * columns, patch_height * patch_width * image_channels]
283
+ patches = patches.reshape([rows * columns, depth])
284
+
285
+ # [rows * columns, 1]
286
+ row_ids = torch.arange(rows).reshape([rows, 1]).repeat(1, columns).reshape([rows * columns, 1])
287
+ col_ids = torch.arange(columns).reshape([1, columns]).repeat(rows, 1).reshape([rows * columns, 1])
288
+
289
+ # Offset by 1 so the ids do not contain zeros, which represent padding.
290
+ row_ids += 1
291
+ col_ids += 1
292
+
293
+ # Prepare additional patch features.
294
+ # [rows * columns, 1]
295
+ row_ids = row_ids.to(torch.float32)
296
+ col_ids = col_ids.to(torch.float32)
297
+
298
+ # [rows * columns, 2 + patch_height * patch_width * image_channels]
299
+ result = torch.cat([row_ids, col_ids, patches], -1)
300
+
301
+ # [max_patches, 2 + patch_height * patch_width * image_channels]
302
+ result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float()
303
+
304
+ result = to_numpy_array(result)
305
+
306
+ return result
307
+
308
+ def normalize(
309
+ self,
310
+ image: np.ndarray,
311
+ data_format: Optional[Union[str, ChannelDimension]] = None,
312
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
313
+ **kwargs,
314
+ ) -> np.ndarray:
315
+ """
316
+ Normalize an image. image = (image - image_mean) / image_std.
317
+
318
+ The image std is to mimic the tensorflow implementation of the `per_image_standardization`:
319
+ https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization
320
+
321
+ Args:
322
+ image (`np.ndarray`):
323
+ Image to normalize.
324
+ data_format (`str` or `ChannelDimension`, *optional*):
325
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
326
+ image is used.
327
+ input_data_format (`str` or `ChannelDimension`, *optional*):
328
+ The channel dimension format of the input image. If not provided, it will be inferred.
329
+ """
330
+ if image.dtype == np.uint8:
331
+ image = image.astype(np.float32)
332
+
333
+ # take mean across the whole `image`
334
+ mean = np.mean(image)
335
+ std = np.std(image)
336
+ adjusted_stddev = max(std, 1.0 / math.sqrt(np.prod(image.shape)))
337
+
338
+ return normalize(
339
+ image,
340
+ mean=mean,
341
+ std=adjusted_stddev,
342
+ data_format=data_format,
343
+ input_data_format=input_data_format,
344
+ **kwargs,
345
+ )
346
+
347
+ def preprocess(
348
+ self,
349
+ images: ImageInput,
350
+ header_text: Optional[str] = None,
351
+ do_convert_rgb: bool = None,
352
+ do_normalize: Optional[bool] = None,
353
+ max_patches: Optional[int] = None,
354
+ patch_size: Optional[Dict[str, int]] = None,
355
+ return_tensors: Optional[Union[str, TensorType]] = None,
356
+ data_format: ChannelDimension = ChannelDimension.FIRST,
357
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
358
+ **kwargs,
359
+ ) -> ImageInput:
360
+ """
361
+ Preprocess an image or batch of images. The processor first computes the maximum possible number of
362
+ aspect-ratio preserving patches of size `patch_size` that can be extracted from the image. It then pads the
363
+ image with zeros to make the image respect the constraint of `max_patches`. Before extracting the patches the
364
+ images are standardized following the tensorflow implementation of `per_image_standardization`
365
+ (https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization).
366
+
367
+
368
+ Args:
369
+ images (`ImageInput`):
370
+ Image to preprocess. Expects a single or batch of images.
371
+ header_text (`Union[List[str], str]`, *optional*):
372
+ Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`.
373
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
374
+ Whether to convert the image to RGB.
375
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
376
+ Whether to normalize the image.
377
+ max_patches (`int`, *optional*, defaults to `self.max_patches`):
378
+ Maximum number of patches to extract.
379
+ patch_size (`dict`, *optional*, defaults to `self.patch_size`):
380
+ Dictionary containing the patch height and width.
381
+ return_tensors (`str` or `TensorType`, *optional*):
382
+ The type of tensors to return. Can be one of:
383
+ - Unset: Return a list of `np.ndarray`.
384
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
385
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
386
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
387
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
388
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
389
+ The channel dimension format for the output image. Can be one of:
390
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
391
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
392
+ - Unset: Use the channel dimension format of the input image.
393
+ input_data_format (`ChannelDimension` or `str`, *optional*):
394
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
395
+ from the input image. Can be one of:
396
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
397
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
398
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
399
+ """
400
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
401
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
402
+ patch_size = patch_size if patch_size is not None else self.patch_size
403
+ max_patches = max_patches if max_patches is not None else self.max_patches
404
+ is_vqa = self.is_vqa
405
+
406
+ if kwargs.get("data_format", None) is not None:
407
+ raise ValueError("data_format is not an accepted input as the outputs are ")
408
+
409
+ images = make_list_of_images(images)
410
+
411
+ if not valid_images(images):
412
+ raise ValueError(
413
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
414
+ "torch.Tensor, tf.Tensor or jax.ndarray."
415
+ )
416
+
417
+ # PIL RGBA images are converted to RGB
418
+ if do_convert_rgb:
419
+ images = [convert_to_rgb(image) for image in images]
420
+
421
+ # All transformations expect numpy arrays.
422
+ images = [to_numpy_array(image) for image in images]
423
+
424
+ if input_data_format is None:
425
+ # We assume that all images have the same channel dimension format.
426
+ input_data_format = infer_channel_dimension_format(images[0])
427
+
428
+ if is_vqa:
429
+ if header_text is None:
430
+ raise ValueError("A header text must be provided for VQA models.")
431
+ font_bytes = kwargs.pop("font_bytes", None)
432
+ font_path = kwargs.pop("font_path", None)
433
+
434
+ if isinstance(header_text, str):
435
+ header_text = [header_text] * len(images)
436
+
437
+ images = [
438
+ render_header(image, header_text[i], font_bytes=font_bytes, font_path=font_path)
439
+ for i, image in enumerate(images)
440
+ ]
441
+
442
+ if do_normalize:
443
+ images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
444
+
445
+ # convert to torch tensor and permute
446
+ images = [
447
+ self.extract_flattened_patches(
448
+ image=image, max_patches=max_patches, patch_size=patch_size, input_data_format=input_data_format
449
+ )
450
+ for image in images
451
+ ]
452
+
453
+ # create attention mask in numpy
454
+ attention_masks = [(image.sum(axis=-1) != 0).astype(np.float32) for image in images]
455
+
456
+ encoded_outputs = BatchFeature(
457
+ data={"flattened_patches": images, "attention_mask": attention_masks}, tensor_type=return_tensors
458
+ )
459
+
460
+ return encoded_outputs
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/modeling_pix2struct.py ADDED
@@ -0,0 +1,1786 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. & Google team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Pix2Struct modeling file"""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutput,
27
+ BaseModelOutputWithPooling,
28
+ CausalLMOutputWithCrossAttentions,
29
+ Seq2SeqLMOutput,
30
+ Seq2SeqModelOutput,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS
34
+ from ...utils import (
35
+ DUMMY_INPUTS,
36
+ DUMMY_MASK,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_torch_fx_proxy,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from .configuration_pix2struct import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ # General docstring
49
+ _CONFIG_FOR_DOC = "Pix2StructConfig"
50
+
51
+
52
+ from ..deprecated._archive_maps import PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ # Adapted from transformers.models.t5.modeling_t5.T5LayerNorm with T5->Pix2Struct
56
+ class Pix2StructLayerNorm(nn.Module):
57
+ def __init__(self, hidden_size, eps=1e-6):
58
+ """
59
+ Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
60
+ """
61
+ super().__init__()
62
+ self.weight = nn.Parameter(torch.ones(hidden_size))
63
+ self.variance_epsilon = eps
64
+
65
+ def forward(self, hidden_states):
66
+ # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
67
+ # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated
68
+ # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
69
+ # half-precision inputs is done in fp32
70
+
71
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
72
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
73
+
74
+ # convert into half-precision if necessary
75
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
76
+ hidden_states = hidden_states.to(self.weight.dtype)
77
+
78
+ return self.weight * hidden_states
79
+
80
+
81
+ try:
82
+ from apex.normalization import FusedRMSNorm
83
+
84
+ Pix2StructLayerNorm = FusedRMSNorm # noqa
85
+
86
+ logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of Pix2StructLayerNorm")
87
+ except ImportError:
88
+ # using the normal Pix2StructLayerNorm
89
+ pass
90
+ except Exception:
91
+ logger.warning("Discovered apex but it failed to load, falling back to Pix2StructLayerNorm")
92
+ pass
93
+
94
+ ALL_LAYERNORM_LAYERS.append(Pix2StructLayerNorm)
95
+
96
+
97
+ class Pix2StructVisionEmbeddings(nn.Module):
98
+ r"""
99
+ Construct the embeddings from patch. In `Pix2Struct` the input is different from classic Vision-transformer models.
100
+ Here the input is a sequence of `seq_len` flattened patches that also combines padding patches (tokens). Each patch
101
+ is represented by a vector of `hidden_size` values.
102
+ """
103
+
104
+ def __init__(self, config: Pix2StructConfig) -> None:
105
+ super().__init__()
106
+ self.patch_projection = nn.Linear(config.patch_embed_hidden_size, config.hidden_size)
107
+
108
+ self.row_embedder = nn.Embedding(config.seq_len, config.hidden_size)
109
+ self.column_embedder = nn.Embedding(config.seq_len, config.hidden_size)
110
+
111
+ self.dropout = nn.Dropout(config.dropout_rate)
112
+
113
+ def forward(self, flattened_patches: torch.Tensor) -> torch.Tensor:
114
+ # the row and column indices are stored in the first and second position of the flattened_patches
115
+ # flattened_patches: `batch_size`, `seq_len`, `hidden_size` + 2
116
+ row_indices = flattened_patches[:, :, 0].long()
117
+ col_indices = flattened_patches[:, :, 1].long()
118
+
119
+ flattened_patches = flattened_patches[:, :, 2:]
120
+
121
+ embeddings = self.patch_projection(flattened_patches)
122
+ row_embeddings = self.row_embedder(row_indices)
123
+ col_embeddings = self.column_embedder(col_indices)
124
+
125
+ # sum all embeddings together
126
+ embeddings = embeddings + row_embeddings + col_embeddings
127
+
128
+ embeddings = self.dropout(embeddings)
129
+
130
+ return embeddings
131
+
132
+
133
+ class Pix2StructVisionAttention(nn.Module):
134
+ def __init__(self, config):
135
+ super().__init__()
136
+ self.hidden_size = config.hidden_size
137
+ self.key_value_proj_dim = config.d_kv
138
+ self.n_heads = config.num_attention_heads
139
+ self.dropout = config.attention_dropout
140
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
141
+
142
+ # Mesh TensorFlow initialization to avoid scaling before softmax
143
+ self.query = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
144
+ self.key = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
145
+ self.value = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
146
+ self.output = nn.Linear(self.inner_dim, self.hidden_size, bias=False)
147
+
148
+ self.gradient_checkpointing = False
149
+
150
+ def forward(
151
+ self,
152
+ hidden_states,
153
+ attention_mask=None,
154
+ position_bias=None,
155
+ layer_head_mask=None,
156
+ output_attentions=False,
157
+ ):
158
+ """
159
+ Self-attention block
160
+ """
161
+ # Input is (batch_size, seq_length, dim)
162
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
163
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
164
+ batch_size, seq_length = hidden_states.shape[:2]
165
+
166
+ def to_projection_shape(states):
167
+ """projection"""
168
+ return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
169
+
170
+ # get query states
171
+ # (batch_size, n_heads, seq_length, dim_per_head)
172
+ query_states = to_projection_shape(self.query(hidden_states))
173
+
174
+ # get key/value states
175
+ key_states = to_projection_shape(self.key(hidden_states))
176
+ value_states = to_projection_shape(self.value(hidden_states))
177
+
178
+ # compute scores
179
+ # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
180
+ scores = torch.matmul(query_states, key_states.transpose(3, 2))
181
+
182
+ if position_bias is None:
183
+ position_bias = torch.zeros(
184
+ (1, self.n_heads, seq_length, seq_length), device=scores.device, dtype=scores.dtype
185
+ )
186
+ if self.gradient_checkpointing and self.training:
187
+ position_bias.requires_grad = True
188
+
189
+ if attention_mask is None:
190
+ attention_mask = torch.ones((batch_size, seq_length), device=scores.device, dtype=scores.dtype)
191
+
192
+ if attention_mask.dim() == 2:
193
+ position_bias = position_bias + attention_mask[:, None, None, :].to(position_bias.device)
194
+ else:
195
+ # (batch_size, n_heads, seq_length, key_length)
196
+ position_bias = position_bias + attention_mask.to(position_bias.device)
197
+ position_bias = 1 - position_bias
198
+
199
+ position_bias_masked = position_bias.masked_fill(position_bias == 1, torch.finfo(scores.dtype).min)
200
+ scores += position_bias_masked
201
+ scores = torch.max(scores, torch.tensor(torch.finfo(scores.dtype).min))
202
+
203
+ # (batch_size, n_heads, seq_length, key_length)
204
+ attn_weights = nn.functional.softmax(scores, dim=-1, dtype=torch.float32).type_as(scores)
205
+
206
+ # (batch_size, n_heads, seq_length, key_length)
207
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
208
+
209
+ # Mask heads if we want to
210
+ if layer_head_mask is not None:
211
+ attn_weights = attn_weights * layer_head_mask
212
+
213
+ attn_output = torch.matmul(attn_weights, value_states)
214
+
215
+ # (batch_size, seq_length, dim)
216
+ attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
217
+
218
+ attn_output = self.output(attn_output)
219
+
220
+ outputs = (attn_output,) + (position_bias,)
221
+
222
+ if output_attentions:
223
+ outputs = outputs + (attn_weights,)
224
+ return outputs
225
+
226
+
227
+ # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5DenseGatedActDense->Pix2StructVisionMlp,T5Config->Pix2StructVisionConfig,config.d_model->config.hidden_size,dropout_rate->dropout_rate
228
+ class Pix2StructVisionMlp(nn.Module):
229
+ def __init__(self, config: Pix2StructVisionConfig):
230
+ super().__init__()
231
+ self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
232
+ self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
233
+ self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
234
+ self.dropout = nn.Dropout(config.dropout_rate)
235
+ self.act = ACT2FN[config.dense_act_fn]
236
+
237
+ def forward(self, hidden_states):
238
+ hidden_gelu = self.act(self.wi_0(hidden_states))
239
+ hidden_linear = self.wi_1(hidden_states)
240
+ hidden_states = hidden_gelu * hidden_linear
241
+ hidden_states = self.dropout(hidden_states)
242
+
243
+ # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
244
+ # See https://github.com/huggingface/transformers/issues/20287
245
+ # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
246
+ if (
247
+ isinstance(self.wo.weight, torch.Tensor)
248
+ and hidden_states.dtype != self.wo.weight.dtype
249
+ and self.wo.weight.dtype != torch.int8
250
+ ):
251
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
252
+
253
+ hidden_states = self.wo(hidden_states)
254
+ return hidden_states
255
+
256
+
257
+ class Pix2StructVisionLayer(nn.Module):
258
+ def __init__(self, config: Pix2StructConfig) -> None:
259
+ super().__init__()
260
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
261
+ self.seq_len_dim = 1
262
+ self.attention = Pix2StructVisionAttention(config)
263
+ self.mlp = Pix2StructVisionMlp(config)
264
+ self.pre_mlp_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
265
+ self.pre_attention_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
266
+
267
+ def forward(
268
+ self,
269
+ hidden_states: torch.Tensor,
270
+ attention_mask: Optional[torch.Tensor] = None,
271
+ head_mask: Optional[torch.Tensor] = None,
272
+ output_attentions: bool = False,
273
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
274
+ residual = hidden_states
275
+
276
+ # in Pix2StructVision, layernorm is applied before self-attention
277
+ hidden_states = self.pre_attention_layer_norm(hidden_states)
278
+
279
+ self_attention_outputs = self.attention(
280
+ hidden_states,
281
+ attention_mask=attention_mask,
282
+ layer_head_mask=head_mask,
283
+ output_attentions=output_attentions,
284
+ )
285
+ attention_output = self_attention_outputs[0]
286
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
287
+
288
+ # first residual connection
289
+ hidden_states = attention_output + residual
290
+
291
+ # in Pix2StructVision, layernorm is also applied after self-attention
292
+ layer_output = self.pre_mlp_layer_norm(hidden_states)
293
+ layer_output = self.mlp(layer_output) + hidden_states # second residual connection
294
+
295
+ outputs = (layer_output,) + outputs
296
+
297
+ return outputs
298
+
299
+
300
+ class Pix2StructVisionEncoder(nn.Module):
301
+ def __init__(self, config: Pix2StructConfig) -> None:
302
+ super().__init__()
303
+ self.config = config
304
+ self.layer = nn.ModuleList([Pix2StructVisionLayer(config) for _ in range(config.num_hidden_layers)])
305
+ self.gradient_checkpointing = False
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.Tensor,
310
+ attention_mask: Optional[torch.Tensor] = None,
311
+ head_mask: Optional[torch.Tensor] = None,
312
+ output_attentions: bool = False,
313
+ output_hidden_states: bool = False,
314
+ return_dict: bool = True,
315
+ ) -> Union[tuple, BaseModelOutput]:
316
+ all_hidden_states = () if output_hidden_states else None
317
+ all_self_attentions = () if output_attentions else None
318
+
319
+ for i, layer_module in enumerate(self.layer):
320
+ if output_hidden_states:
321
+ all_hidden_states = all_hidden_states + (hidden_states,)
322
+
323
+ layer_head_mask = head_mask[i] if head_mask is not None else None
324
+
325
+ if self.gradient_checkpointing and self.training:
326
+ layer_outputs = self._gradient_checkpointing_func(
327
+ layer_module.__call__,
328
+ hidden_states,
329
+ attention_mask,
330
+ layer_head_mask,
331
+ output_attentions,
332
+ )
333
+ else:
334
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
335
+
336
+ hidden_states = layer_outputs[0]
337
+
338
+ if output_attentions:
339
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
340
+
341
+ if output_hidden_states:
342
+ all_hidden_states = all_hidden_states + (hidden_states,)
343
+
344
+ if not return_dict:
345
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
346
+ return BaseModelOutput(
347
+ last_hidden_state=hidden_states,
348
+ hidden_states=all_hidden_states,
349
+ attentions=all_self_attentions,
350
+ )
351
+
352
+
353
+ class Pix2StructPreTrainedModel(PreTrainedModel):
354
+ """
355
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
356
+ models.
357
+ """
358
+
359
+ config_class = Pix2StructConfig
360
+
361
+ @property
362
+ def dummy_inputs(self):
363
+ input_ids = torch.tensor(DUMMY_INPUTS)
364
+ input_mask = torch.tensor(DUMMY_MASK)
365
+ dummy_inputs = {
366
+ "decoder_input_ids": input_ids,
367
+ "input_ids": input_ids,
368
+ "decoder_attention_mask": input_mask,
369
+ }
370
+ return dummy_inputs
371
+
372
+ def _init_weights(self, module):
373
+ """Initialize the weights"""
374
+ factor = self.config.initializer_factor # Used for testing weights initialization
375
+ if isinstance(module, Pix2StructLayerNorm):
376
+ module.weight.data.fill_(factor * 1.0)
377
+ elif isinstance(module, Pix2StructTextDenseGatedActDense):
378
+ hidden_size = (
379
+ self.config.text_config.hidden_size
380
+ if isinstance(self.config, Pix2StructConfig)
381
+ else self.config.hidden_size
382
+ )
383
+ d_ff = self.config.text_config.d_ff if isinstance(self.config, Pix2StructConfig) else self.config.d_ff
384
+
385
+ module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
386
+ if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None:
387
+ module.wi_0.bias.data.zero_()
388
+ module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
389
+ if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None:
390
+ module.wi_1.bias.data.zero_()
391
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((d_ff) ** -0.5))
392
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
393
+ module.wo.bias.data.zero_()
394
+ elif isinstance(module, Pix2StructTextAttention):
395
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
396
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
397
+ hidden_size = (
398
+ self.config.text_config.hidden_size
399
+ if isinstance(self.config, Pix2StructConfig)
400
+ else self.config.hidden_size
401
+ )
402
+ key_value_proj_dim = (
403
+ self.config.text_config.d_kv if isinstance(self.config, Pix2StructConfig) else self.config.hidden_size
404
+ )
405
+ n_heads = (
406
+ self.config.text_config.num_heads
407
+ if isinstance(self.config, Pix2StructConfig)
408
+ else self.config.num_heads
409
+ )
410
+
411
+ module.query.weight.data.normal_(mean=0.0, std=factor * ((hidden_size * key_value_proj_dim) ** -0.5))
412
+ module.key.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5))
413
+ module.value.weight.data.normal_(mean=0.0, std=factor * (hidden_size**-0.5))
414
+ module.output.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
415
+ if module.has_relative_attention_bias:
416
+ module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
417
+ elif isinstance(module, nn.Embedding):
418
+ hidden_size = (
419
+ self.config.text_config.hidden_size
420
+ if isinstance(self.config, Pix2StructConfig)
421
+ else self.config.hidden_size
422
+ )
423
+
424
+ module.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
425
+ if module.padding_idx is not None:
426
+ module.weight.data[module.padding_idx].zero_()
427
+ elif isinstance(module, Pix2StructTextModel):
428
+ hidden_size = (
429
+ self.config.text_config.hidden_size
430
+ if isinstance(self.config, Pix2StructConfig)
431
+ else self.config.hidden_size
432
+ )
433
+
434
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * ((hidden_size) ** -0.5))
435
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
436
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
437
+ # `trunc_normal_cpu` not implemented in `half` issues
438
+ module.weight.data = nn.init.trunc_normal_(
439
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
440
+ ).to(module.weight.dtype)
441
+ if module.bias is not None:
442
+ module.bias.data.zero_()
443
+ elif isinstance(module, Pix2StructLayerNorm):
444
+ if module.weight is not None:
445
+ module.weight.data.fill_(1.0)
446
+ elif isinstance(module, nn.Embedding):
447
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
448
+ if module.padding_idx is not None:
449
+ module.weight.data[module.padding_idx].zero_()
450
+
451
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->Pix2Struct
452
+ def _shift_right(self, input_ids):
453
+ decoder_start_token_id = self.config.decoder_start_token_id
454
+ pad_token_id = self.config.pad_token_id
455
+
456
+ if decoder_start_token_id is None:
457
+ raise ValueError(
458
+ "self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id. "
459
+ "See Pix2Struct docs for more information."
460
+ )
461
+
462
+ # shift inputs to the right
463
+ if is_torch_fx_proxy(input_ids):
464
+ # Item assignment is not supported natively for proxies.
465
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
466
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
467
+ else:
468
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
469
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
470
+ shifted_input_ids[..., 0] = decoder_start_token_id
471
+
472
+ if pad_token_id is None:
473
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
474
+ # replace possible -100 values in labels by `pad_token_id`
475
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
476
+
477
+ return shifted_input_ids
478
+
479
+
480
+ PIX2STRUCT_VISION_START_DOCSTRING = r"""
481
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
482
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
483
+ behavior.
484
+
485
+ Parameters:
486
+ config ([`Pix2StructConfig`]): Model configuration class with all the parameters of the model.
487
+ Initializing with a config file does not load the weights associated with the model, only the
488
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
489
+ """
490
+
491
+ PIX2STRUCT_VISION_INPUTS_DOCSTRING = r"""
492
+ Args:
493
+ flattened_patches (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_channels x patch_height x patch_width)`):
494
+ Flattened and padded pixel values. These values can be obtained using [`AutoImageProcessor`]. See
495
+ [`Pix2StructVisionImageProcessor.__call__`] for details. Check the [original
496
+ paper](https://arxiv.org/abs/2210.03347) (figure 5) for more details.
497
+
498
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
499
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
500
+
501
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
502
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
503
+
504
+ - 1 indicates the head is **not masked**,
505
+ - 0 indicates the head is **masked**.
506
+
507
+ output_attentions (`bool`, *optional*):
508
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
509
+ tensors for more detail.
510
+ output_hidden_states (`bool`, *optional*):
511
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
512
+ more detail.
513
+ return_dict (`bool`, *optional*):
514
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
515
+ """
516
+
517
+
518
+ @add_start_docstrings(
519
+ "The bare Pix2StructVision Model transformer outputting raw hidden-states without any specific head on top.",
520
+ PIX2STRUCT_VISION_START_DOCSTRING,
521
+ )
522
+ class Pix2StructVisionModel(Pix2StructPreTrainedModel):
523
+ config_class = Pix2StructVisionConfig
524
+ main_input_name = "flattened_patches"
525
+ supports_gradient_checkpointing = True
526
+ _no_split_modules = ["Pix2StructVisionLayer"]
527
+
528
+ def __init__(self, config: Pix2StructConfig):
529
+ super().__init__(config)
530
+ self.config = config
531
+
532
+ self.embeddings = Pix2StructVisionEmbeddings(config)
533
+ self.encoder = Pix2StructVisionEncoder(config)
534
+
535
+ self.layernorm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
536
+
537
+ # Initialize weights and apply final processing
538
+ self.post_init()
539
+
540
+ def get_input_embeddings(self):
541
+ return self.embeddings.patch_projection
542
+
543
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
544
+ """
545
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
546
+ class PreTrainedModel
547
+ """
548
+ for layer, heads in heads_to_prune.items():
549
+ self.encoder.layer[layer].attention.prune_heads(heads)
550
+
551
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_VISION_INPUTS_DOCSTRING)
552
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
553
+ def forward(
554
+ self,
555
+ flattened_patches: Optional[torch.Tensor] = None,
556
+ attention_mask: Optional[torch.Tensor] = None,
557
+ head_mask: Optional[torch.Tensor] = None,
558
+ output_attentions: Optional[bool] = None,
559
+ output_hidden_states: Optional[bool] = None,
560
+ return_dict: Optional[bool] = None,
561
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
562
+ r"""
563
+ Returns:
564
+
565
+ Example:
566
+
567
+ ```python
568
+ >>> import requests
569
+ >>> from PIL import Image
570
+ >>> from transformers import AutoProcessor, Pix2StructVisionModel
571
+
572
+ >>> image_processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
573
+ >>> model = Pix2StructVisionModel.from_pretrained("google/pix2struct-textcaps-base")
574
+
575
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
576
+ >>> image = Image.open(requests.get(url, stream=True).raw)
577
+
578
+ >>> inputs = image_processor(images=image, return_tensors="pt")
579
+ >>> with torch.no_grad():
580
+ ... outputs = model(**inputs)
581
+
582
+ >>> last_hidden_states = outputs.last_hidden_state
583
+ >>> list(last_hidden_states.shape)
584
+ [1, 2048, 768]
585
+ ```
586
+ """
587
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
588
+ output_hidden_states = (
589
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
590
+ )
591
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
592
+
593
+ if flattened_patches is None:
594
+ raise ValueError("You have to specify flattened_patches")
595
+
596
+ if attention_mask is None:
597
+ # check where `flattened_patches` is not 0
598
+ attention_mask = (flattened_patches.sum(dim=-1) != 0).float()
599
+
600
+ # Prepare head mask if needed
601
+ # 1.0 in head_mask indicate we keep the head
602
+ # attention_probs has shape bsz x n_heads x N x N
603
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
604
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
605
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
606
+
607
+ embedding_output = self.embeddings(flattened_patches)
608
+
609
+ encoder_outputs = self.encoder(
610
+ embedding_output,
611
+ attention_mask=attention_mask,
612
+ head_mask=head_mask,
613
+ output_attentions=output_attentions,
614
+ output_hidden_states=output_hidden_states,
615
+ return_dict=return_dict,
616
+ )
617
+ sequence_output = encoder_outputs[0]
618
+ sequence_output = self.layernorm(sequence_output)
619
+
620
+ if not return_dict:
621
+ head_outputs = (sequence_output,)
622
+ return head_outputs + encoder_outputs[1:]
623
+
624
+ return BaseModelOutput(
625
+ last_hidden_state=sequence_output,
626
+ hidden_states=encoder_outputs.hidden_states,
627
+ attentions=encoder_outputs.attentions,
628
+ )
629
+
630
+
631
+ # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->Pix2StructText,d_model->hidden_size
632
+ class Pix2StructTextDenseGatedActDense(nn.Module):
633
+ def __init__(self, config: Pix2StructTextConfig):
634
+ super().__init__()
635
+ self.wi_0 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
636
+ self.wi_1 = nn.Linear(config.hidden_size, config.d_ff, bias=False)
637
+ self.wo = nn.Linear(config.d_ff, config.hidden_size, bias=False)
638
+ self.dropout = nn.Dropout(config.dropout_rate)
639
+ self.act = ACT2FN[config.dense_act_fn]
640
+
641
+ def forward(self, hidden_states):
642
+ hidden_gelu = self.act(self.wi_0(hidden_states))
643
+ hidden_linear = self.wi_1(hidden_states)
644
+ hidden_states = hidden_gelu * hidden_linear
645
+ hidden_states = self.dropout(hidden_states)
646
+
647
+ # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
648
+ # See https://github.com/huggingface/transformers/issues/20287
649
+ # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
650
+ if (
651
+ isinstance(self.wo.weight, torch.Tensor)
652
+ and hidden_states.dtype != self.wo.weight.dtype
653
+ and self.wo.weight.dtype != torch.int8
654
+ ):
655
+ hidden_states = hidden_states.to(self.wo.weight.dtype)
656
+
657
+ hidden_states = self.wo(hidden_states)
658
+ return hidden_states
659
+
660
+
661
+ class Pix2StructTextLayerFF(nn.Module):
662
+ def __init__(self, config: Pix2StructTextConfig):
663
+ super().__init__()
664
+ self.DenseReluDense = Pix2StructTextDenseGatedActDense(config)
665
+
666
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
667
+ self.dropout = nn.Dropout(config.dropout_rate)
668
+
669
+ # Copied from transformers.models.t5.modeling_t5.T5LayerFF.forward
670
+ def forward(self, hidden_states):
671
+ forwarded_states = self.layer_norm(hidden_states)
672
+ forwarded_states = self.DenseReluDense(forwarded_states)
673
+ hidden_states = hidden_states + self.dropout(forwarded_states)
674
+ return hidden_states
675
+
676
+
677
+ class Pix2StructTextAttention(nn.Module):
678
+ def __init__(self, config: Pix2StructTextConfig, has_relative_attention_bias=False):
679
+ super().__init__()
680
+ self.has_relative_attention_bias = has_relative_attention_bias
681
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
682
+ self.relative_attention_max_distance = config.relative_attention_max_distance
683
+ self.hidden_size = config.hidden_size
684
+ self.key_value_proj_dim = config.d_kv
685
+ self.n_heads = config.num_heads
686
+ self.dropout = config.dropout_rate
687
+ self.inner_dim = self.n_heads * self.key_value_proj_dim
688
+
689
+ # Mesh TensorFlow initialization to avoid scaling before softmax
690
+ self.query = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
691
+ self.key = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
692
+ self.value = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
693
+ self.output = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
694
+
695
+ if self.has_relative_attention_bias:
696
+ self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
697
+ self.pruned_heads = set()
698
+ self.gradient_checkpointing = False
699
+
700
+ @staticmethod
701
+ # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket
702
+ def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
703
+ """
704
+ Adapted from Mesh Tensorflow:
705
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
706
+
707
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
708
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
709
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
710
+ small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
711
+ positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
712
+ This should allow for more graceful generalization to longer sequences than the model has been trained on
713
+
714
+ Args:
715
+ relative_position: an int32 Tensor
716
+ bidirectional: a boolean - whether the attention is bidirectional
717
+ num_buckets: an integer
718
+ max_distance: an integer
719
+
720
+ Returns:
721
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
722
+ """
723
+ relative_buckets = 0
724
+ if bidirectional:
725
+ num_buckets //= 2
726
+ relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
727
+ relative_position = torch.abs(relative_position)
728
+ else:
729
+ relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
730
+ # now relative_position is in the range [0, inf)
731
+
732
+ # half of the buckets are for exact increments in positions
733
+ max_exact = num_buckets // 2
734
+ is_small = relative_position < max_exact
735
+
736
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
737
+ relative_position_if_large = max_exact + (
738
+ torch.log(relative_position.float() / max_exact)
739
+ / math.log(max_distance / max_exact)
740
+ * (num_buckets - max_exact)
741
+ ).to(torch.long)
742
+ relative_position_if_large = torch.min(
743
+ relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
744
+ )
745
+
746
+ relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
747
+ return relative_buckets
748
+
749
+ # Adapted from transformers.models.t5.modeling_t5.T5Attention.compute_bias
750
+ def compute_bias(self, query_length, key_length, device=None):
751
+ """Compute binned relative position bias"""
752
+ if device is None:
753
+ device = self.relative_attention_bias.weight.device
754
+ context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
755
+ memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
756
+ relative_position = memory_position - context_position # shape (query_length, key_length)
757
+ relative_position_bucket = self._relative_position_bucket(
758
+ relative_position, # shape (query_length, key_length)
759
+ bidirectional=False,
760
+ num_buckets=self.relative_attention_num_buckets,
761
+ max_distance=self.relative_attention_max_distance,
762
+ )
763
+ values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
764
+ values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
765
+ return values
766
+
767
+ def forward(
768
+ self,
769
+ hidden_states,
770
+ mask=None,
771
+ key_value_states=None,
772
+ position_bias=None,
773
+ past_key_value=None,
774
+ layer_head_mask=None,
775
+ query_length=None,
776
+ use_cache=False,
777
+ output_attentions=False,
778
+ ):
779
+ """
780
+ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
781
+ """
782
+ # Input is (batch_size, seq_length, dim)
783
+ # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
784
+ # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
785
+ batch_size, seq_length = hidden_states.shape[:2]
786
+
787
+ real_seq_length = seq_length
788
+
789
+ if past_key_value is not None:
790
+ if len(past_key_value) != 2:
791
+ raise ValueError(
792
+ f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states"
793
+ )
794
+ real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
795
+
796
+ key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
797
+
798
+ def to_projection_shape(states):
799
+ """projection"""
800
+ return states.contiguous().view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
801
+
802
+ def project(hidden_states, proj_layer, key_value_states, past_key_value):
803
+ """projects hidden states correctly to key/query states"""
804
+ if key_value_states is None:
805
+ # self-attn
806
+ # (batch_size, n_heads, seq_length, dim_per_head)
807
+ hidden_states = to_projection_shape(proj_layer(hidden_states))
808
+ elif past_key_value is None:
809
+ # cross-attn
810
+ # (batch_size, n_heads, seq_length, dim_per_head)
811
+ hidden_states = to_projection_shape(proj_layer(key_value_states))
812
+
813
+ if past_key_value is not None:
814
+ if key_value_states is None:
815
+ # self-attn
816
+ # (batch_size, n_heads, key_length, dim_per_head)
817
+ hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
818
+ elif past_key_value.shape[2] != key_value_states.shape[1]:
819
+ # checking that the `sequence_length` of the `past_key_value` is the same as
820
+ # the provided `key_value_states` to support prefix tuning
821
+ # cross-attn
822
+ # (batch_size, n_heads, seq_length, dim_per_head)
823
+ hidden_states = to_projection_shape(proj_layer(key_value_states))
824
+ else:
825
+ # cross-attn
826
+ hidden_states = past_key_value
827
+ return hidden_states
828
+
829
+ # get query states
830
+ # (batch_size, n_heads, seq_length, dim_per_head)
831
+ query_states = to_projection_shape(self.query(hidden_states))
832
+
833
+ # get key/value states
834
+ key_states = project(
835
+ hidden_states, self.key, key_value_states, past_key_value[0] if past_key_value is not None else None
836
+ )
837
+ value_states = project(
838
+ hidden_states, self.value, key_value_states, past_key_value[1] if past_key_value is not None else None
839
+ )
840
+
841
+ # compute scores
842
+ scores = torch.matmul(
843
+ query_states, key_states.transpose(3, 2)
844
+ ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
845
+
846
+ if position_bias is None:
847
+ if not self.has_relative_attention_bias:
848
+ position_bias = torch.zeros(
849
+ (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
850
+ )
851
+ if self.gradient_checkpointing and self.training:
852
+ position_bias.requires_grad = True
853
+ else:
854
+ position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)
855
+
856
+ # if key and values are already calculated
857
+ # we want only the last query position bias
858
+ if past_key_value is not None:
859
+ position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
860
+
861
+ if mask is not None:
862
+ position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
863
+
864
+ if self.pruned_heads:
865
+ mask = torch.ones(position_bias.shape[1])
866
+ mask[list(self.pruned_heads)] = 0
867
+ position_bias_masked = position_bias[:, mask.bool()]
868
+ else:
869
+ position_bias_masked = position_bias
870
+
871
+ scores += position_bias_masked
872
+ # (batch_size, n_heads, seq_length, key_length)
873
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
874
+
875
+ # (batch_size, n_heads, seq_length, key_length)
876
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
877
+
878
+ # Mask heads if we want to
879
+ if layer_head_mask is not None:
880
+ attn_weights = attn_weights * layer_head_mask
881
+
882
+ attn_output = torch.matmul(attn_weights, value_states)
883
+ # (batch_size, seq_length, dim)
884
+ attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
885
+
886
+ attn_output = self.output(attn_output)
887
+
888
+ present_key_value_state = (key_states, value_states) if use_cache else None
889
+ outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
890
+
891
+ if output_attentions:
892
+ outputs = outputs + (attn_weights,)
893
+ return outputs
894
+
895
+
896
+ # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.SelfAttention->self.attention,config.d_model->config.hidden_size
897
+ class Pix2StructTextLayerSelfAttention(nn.Module):
898
+ def __init__(self, config, has_relative_attention_bias=False):
899
+ super().__init__()
900
+ self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=has_relative_attention_bias)
901
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
902
+ self.dropout = nn.Dropout(config.dropout_rate)
903
+
904
+ def forward(
905
+ self,
906
+ hidden_states,
907
+ attention_mask=None,
908
+ position_bias=None,
909
+ layer_head_mask=None,
910
+ past_key_value=None,
911
+ use_cache=False,
912
+ output_attentions=False,
913
+ ):
914
+ normed_hidden_states = self.layer_norm(hidden_states)
915
+ attention_output = self.attention(
916
+ normed_hidden_states,
917
+ mask=attention_mask,
918
+ position_bias=position_bias,
919
+ layer_head_mask=layer_head_mask,
920
+ past_key_value=past_key_value,
921
+ use_cache=use_cache,
922
+ output_attentions=output_attentions,
923
+ )
924
+ hidden_states = hidden_states + self.dropout(attention_output[0])
925
+ outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
926
+ return outputs
927
+
928
+
929
+ # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5LayerNorm->Pix2StructLayerNorm,T5Attention->Pix2StructTextAttention,self.EncDecAttention->self.attention,config.d_model->config.hidden_size
930
+ class Pix2StructTextLayerCrossAttention(nn.Module):
931
+ def __init__(self, config):
932
+ super().__init__()
933
+ self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=False)
934
+ self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
935
+ self.dropout = nn.Dropout(config.dropout_rate)
936
+
937
+ def forward(
938
+ self,
939
+ hidden_states,
940
+ key_value_states,
941
+ attention_mask=None,
942
+ position_bias=None,
943
+ layer_head_mask=None,
944
+ past_key_value=None,
945
+ use_cache=False,
946
+ query_length=None,
947
+ output_attentions=False,
948
+ ):
949
+ normed_hidden_states = self.layer_norm(hidden_states)
950
+ attention_output = self.attention(
951
+ normed_hidden_states,
952
+ mask=attention_mask,
953
+ key_value_states=key_value_states,
954
+ position_bias=position_bias,
955
+ layer_head_mask=layer_head_mask,
956
+ past_key_value=past_key_value,
957
+ use_cache=use_cache,
958
+ query_length=query_length,
959
+ output_attentions=output_attentions,
960
+ )
961
+ layer_output = hidden_states + self.dropout(attention_output[0])
962
+ outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
963
+ return outputs
964
+
965
+
966
+ class Pix2StructTextBlock(nn.Module):
967
+ def __init__(self, config, has_relative_attention_bias=False):
968
+ super().__init__()
969
+
970
+ self.self_attention = Pix2StructTextLayerSelfAttention(
971
+ config, has_relative_attention_bias=has_relative_attention_bias
972
+ )
973
+
974
+ self.encoder_decoder_attention = Pix2StructTextLayerCrossAttention(config)
975
+
976
+ self.mlp = Pix2StructTextLayerFF(config)
977
+
978
+ def forward(
979
+ self,
980
+ hidden_states,
981
+ attention_mask=None,
982
+ position_bias=None,
983
+ encoder_hidden_states=None,
984
+ encoder_attention_mask=None,
985
+ encoder_decoder_position_bias=None,
986
+ layer_head_mask=None,
987
+ cross_attn_layer_head_mask=None,
988
+ past_key_value=None,
989
+ use_cache=False,
990
+ output_attentions=False,
991
+ return_dict=True,
992
+ ):
993
+ if past_key_value is not None:
994
+ expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
995
+
996
+ if len(past_key_value) != expected_num_past_key_values:
997
+ raise ValueError(
998
+ f"There should be {expected_num_past_key_values} past states. "
999
+ f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
1000
+ f"Got {len(past_key_value)} past key / value states"
1001
+ )
1002
+
1003
+ self_attn_past_key_value = past_key_value[:2]
1004
+ cross_attn_past_key_value = past_key_value[2:]
1005
+ else:
1006
+ self_attn_past_key_value, cross_attn_past_key_value = None, None
1007
+
1008
+ self_attention_outputs = self.self_attention(
1009
+ hidden_states,
1010
+ attention_mask=attention_mask,
1011
+ position_bias=position_bias,
1012
+ layer_head_mask=layer_head_mask,
1013
+ past_key_value=self_attn_past_key_value,
1014
+ use_cache=use_cache,
1015
+ output_attentions=output_attentions,
1016
+ )
1017
+ hidden_states, present_key_value_state = self_attention_outputs[:2]
1018
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
1019
+
1020
+ # clamp inf values to enable fp16 training
1021
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1022
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1023
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1024
+
1025
+ do_cross_attention = encoder_hidden_states is not None
1026
+ if do_cross_attention:
1027
+ # the actual query length is unknown for cross attention
1028
+ # if using past key value states. Need to inject it here
1029
+ if present_key_value_state is not None:
1030
+ query_length = present_key_value_state[0].shape[2]
1031
+ else:
1032
+ query_length = None
1033
+
1034
+ cross_attention_outputs = self.encoder_decoder_attention(
1035
+ hidden_states,
1036
+ key_value_states=encoder_hidden_states,
1037
+ attention_mask=encoder_attention_mask,
1038
+ position_bias=encoder_decoder_position_bias,
1039
+ layer_head_mask=cross_attn_layer_head_mask,
1040
+ past_key_value=cross_attn_past_key_value,
1041
+ query_length=query_length,
1042
+ use_cache=use_cache,
1043
+ output_attentions=output_attentions,
1044
+ )
1045
+ hidden_states = cross_attention_outputs[0]
1046
+
1047
+ # clamp inf values to enable fp16 training
1048
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1049
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1050
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1051
+
1052
+ # Combine self attn and cross attn key value states
1053
+ if present_key_value_state is not None:
1054
+ present_key_value_state = present_key_value_state + cross_attention_outputs[1]
1055
+
1056
+ # Keep cross-attention outputs and relative position weights
1057
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
1058
+
1059
+ # Apply Feed Forward layer
1060
+ hidden_states = self.mlp(hidden_states)
1061
+
1062
+ # clamp inf values to enable fp16 training
1063
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
1064
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
1065
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
1066
+
1067
+ outputs = (hidden_states,)
1068
+
1069
+ if use_cache:
1070
+ outputs = outputs + (present_key_value_state,) + attention_outputs
1071
+ else:
1072
+ outputs = outputs + attention_outputs
1073
+
1074
+ return outputs
1075
+
1076
+
1077
+ PIX2STRUCT_START_DOCSTRING = r"""
1078
+
1079
+ The Pix2Struct model was proposed in [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language
1080
+ Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu,
1081
+ Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. It's an encoder decoder
1082
+ transformer pre-trained in a image-to-text setting.
1083
+
1084
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1085
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1086
+ etc.)
1087
+
1088
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1089
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1090
+ and behavior.
1091
+
1092
+ Parameters:
1093
+ config (Union[`Pix2StructConfig`, `Pix2StructTextConfig`]):
1094
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1095
+ load the weights associated with the model, only the configuration. Check out the
1096
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1097
+ """
1098
+
1099
+ PIX2STRUCT_TEXT_INPUTS_DOCSTRING = r"""
1100
+ Args:
1101
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1102
+ Indices of input sequence tokens in the vocabulary. Pix2StructText is a model with relative position
1103
+ embeddings so you should be able to pad the inputs on both the right and the left.
1104
+
1105
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1106
+ [`PreTrainedTokenizer.__call__`] for detail.
1107
+
1108
+ [What are input IDs?](../glossary#input-ids)
1109
+
1110
+ To know more on how to prepare `input_ids` for pretraining take a look a [Pix2StructText
1111
+ Training](./t5#training).
1112
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1113
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1114
+
1115
+ - 1 for tokens that are **not masked**,
1116
+ - 0 for tokens that are **masked**.
1117
+
1118
+ [What are attention masks?](../glossary#attention-mask)
1119
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1120
+ Indices of decoder input sequence tokens in the vocabulary.
1121
+
1122
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1123
+ [`PreTrainedTokenizer.__call__`] for details.
1124
+
1125
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
1126
+
1127
+ Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
1128
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1129
+ `past_key_values`).
1130
+
1131
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
1132
+ Training](./t5#training).
1133
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1134
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
1135
+ be used by default.
1136
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1137
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1138
+ 1]`:
1139
+
1140
+ - 1 indicates the head is **not masked**,
1141
+ - 0 indicates the head is **masked**.
1142
+
1143
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1144
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1145
+ 1]`:
1146
+
1147
+ - 1 indicates the head is **not masked**,
1148
+ - 0 indicates the head is **masked**.
1149
+
1150
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1151
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
1152
+ `[0, 1]`:
1153
+
1154
+ - 1 indicates the head is **not masked**,
1155
+ - 0 indicates the head is **masked**.
1156
+
1157
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
1158
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
1159
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
1160
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
1161
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1162
+ Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding.
1163
+
1164
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1165
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1166
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1167
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1168
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1169
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1170
+ model's internal embedding lookup matrix.
1171
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
1172
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
1173
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
1174
+ input (see `past_key_values`). This is useful if you want more control over how to convert
1175
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
1176
+
1177
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
1178
+ of `inputs_embeds`.
1179
+
1180
+ use_cache (`bool`, *optional*):
1181
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1182
+ `past_key_values`).
1183
+
1184
+ output_attentions (`bool`, *optional*):
1185
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1186
+ tensors for more detail.
1187
+ output_hidden_states (`bool`, *optional*):
1188
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1189
+ more detail.
1190
+ return_dict (`bool`, *optional*):
1191
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1192
+ """
1193
+
1194
+ PIX2STRUCT_INPUTS_DOCSTRING = r"""
1195
+ Args:
1196
+ flattened_patches (`torch.FloatTensor` of shape `(batch_size, seq_length, hidden_size)`):
1197
+ Flattened pixel patches. the `hidden_size` is obtained by the following formula: `hidden_size` =
1198
+ `num_channels` * `patch_size` * `patch_size`
1199
+
1200
+ The process of flattening the pixel patches is done by `Pix2StructProcessor`.
1201
+
1202
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1203
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1204
+
1205
+ - 1 for tokens that are **not masked**,
1206
+ - 0 for tokens that are **masked**.
1207
+
1208
+ [What are attention masks?](../glossary#attention-mask)
1209
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1210
+ Indices of decoder input sequence tokens in the vocabulary.
1211
+
1212
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1213
+ [`PreTrainedTokenizer.__call__`] for details.
1214
+
1215
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
1216
+
1217
+ Pix2StructText uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
1218
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1219
+ `past_key_values`).
1220
+
1221
+ To know more on how to prepare `decoder_input_ids` for pretraining take a look at [Pix2StructText
1222
+ Training](./t5#training).
1223
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
1224
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
1225
+ be used by default.
1226
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1227
+ Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,
1228
+ 1]`:
1229
+
1230
+ - 1 indicates the head is **not masked**,
1231
+ - 0 indicates the head is **masked**.
1232
+
1233
+ decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1234
+ Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,
1235
+ 1]`:
1236
+
1237
+ - 1 indicates the head is **not masked**,
1238
+ - 0 indicates the head is **masked**.
1239
+
1240
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1241
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in
1242
+ `[0, 1]`:
1243
+
1244
+ - 1 indicates the head is **not masked**,
1245
+ - 0 indicates the head is **masked**.
1246
+
1247
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
1248
+ Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)
1249
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at
1250
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
1251
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1252
+ Contains precomputed key and value hidden states of the attention layers. Can be used to speed up decoding.
1253
+
1254
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1255
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1256
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1257
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
1258
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
1259
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
1260
+ input (see `past_key_values`). This is useful if you want more control over how to convert
1261
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
1262
+
1263
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
1264
+ of `inputs_embeds`.
1265
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1266
+ Labels for computing the masked language modeling loss for the decoder.
1267
+
1268
+ use_cache (`bool`, *optional*):
1269
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1270
+ `past_key_values`).
1271
+
1272
+ output_attentions (`bool`, *optional*):
1273
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1274
+ tensors for more detail.
1275
+ output_hidden_states (`bool`, *optional*):
1276
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1277
+ more detail.
1278
+ return_dict (`bool`, *optional*):
1279
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1280
+ """
1281
+
1282
+
1283
+ @add_start_docstrings(
1284
+ "The standalone text decoder of Pix2Struct",
1285
+ PIX2STRUCT_START_DOCSTRING,
1286
+ )
1287
+ class Pix2StructTextModel(Pix2StructPreTrainedModel):
1288
+ config_class = Pix2StructTextConfig
1289
+ _no_split_modules = ["Pix2StructTextBlock"]
1290
+ _tied_weights_keys = ["lm_head.weight"]
1291
+ supports_gradient_checkpointing = True
1292
+
1293
+ def __init__(self, config):
1294
+ super().__init__(config)
1295
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
1296
+
1297
+ self.layer = nn.ModuleList(
1298
+ [Pix2StructTextBlock(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
1299
+ )
1300
+ self.final_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
1301
+ self.dropout = nn.Dropout(config.dropout_rate)
1302
+
1303
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1304
+
1305
+ # Initialize weights and apply final processing
1306
+ self.post_init()
1307
+ self.gradient_checkpointing = False
1308
+
1309
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._reorder_cache
1310
+ def _reorder_cache(self, past_key_values, beam_idx):
1311
+ # if decoder past is not included in output
1312
+ # speedy decoding is disabled and no need to reorder
1313
+ if past_key_values is None:
1314
+ logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
1315
+ return past_key_values
1316
+
1317
+ reordered_decoder_past = ()
1318
+ for layer_past_states in past_key_values:
1319
+ # get the correct batch idx from layer past batch dim
1320
+ # batch dim of `past` is at 2nd position
1321
+ reordered_layer_past_states = ()
1322
+ for layer_past_state in layer_past_states:
1323
+ # need to set correct `past` for each of the four key / value states
1324
+ reordered_layer_past_states = reordered_layer_past_states + (
1325
+ layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
1326
+ )
1327
+
1328
+ if reordered_layer_past_states[0].shape != layer_past_states[0].shape:
1329
+ raise ValueError(
1330
+ f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched"
1331
+ )
1332
+ if len(reordered_layer_past_states) != len(layer_past_states):
1333
+ raise ValueError(
1334
+ f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched"
1335
+ )
1336
+
1337
+ reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
1338
+ return reordered_decoder_past
1339
+
1340
+ def get_input_embeddings(self):
1341
+ return self.embed_tokens
1342
+
1343
+ def set_input_embeddings(self, new_embeddings):
1344
+ self.embed_tokens = new_embeddings
1345
+
1346
+ def get_output_embeddings(self):
1347
+ return self.lm_head
1348
+
1349
+ def set_output_embeddings(self, new_embeddings):
1350
+ self.lm_head = new_embeddings
1351
+
1352
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_TEXT_INPUTS_DOCSTRING)
1353
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1354
+ def forward(
1355
+ self,
1356
+ input_ids: Optional[torch.LongTensor] = None,
1357
+ attention_mask: Optional[torch.FloatTensor] = None,
1358
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1359
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1360
+ inputs_embeds: Optional[torch.LongTensor] = None,
1361
+ head_mask: Optional[torch.FloatTensor] = None,
1362
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1363
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1364
+ use_cache: Optional[bool] = None,
1365
+ output_attentions: Optional[bool] = None,
1366
+ output_hidden_states: Optional[bool] = None,
1367
+ labels: Optional[torch.LongTensor] = None,
1368
+ return_dict: Optional[bool] = None,
1369
+ **kwargs,
1370
+ ) -> Union[Tuple[torch.FloatTensor, ...], CausalLMOutputWithCrossAttentions]:
1371
+ r"""
1372
+ Returns:
1373
+
1374
+ Example:
1375
+
1376
+ ```python
1377
+ >>> from transformers import AutoProcessor, Pix2StructTextModel
1378
+
1379
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
1380
+ >>> model = Pix2StructTextModel.from_pretrained("google/pix2struct-textcaps-base")
1381
+
1382
+ >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")
1383
+ >>> outputs = model(**inputs)
1384
+ >>> loss = outputs.loss
1385
+ ```
1386
+ """
1387
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1388
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1389
+ output_hidden_states = (
1390
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1391
+ )
1392
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1393
+
1394
+ if input_ids is not None and inputs_embeds is not None:
1395
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1396
+ elif input_ids is not None:
1397
+ input_shape = input_ids.size()
1398
+ input_ids = input_ids.view(-1, input_shape[-1])
1399
+ elif inputs_embeds is not None:
1400
+ input_shape = inputs_embeds.size()[:-1]
1401
+ else:
1402
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1403
+
1404
+ if inputs_embeds is None:
1405
+ assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
1406
+ inputs_embeds = self.embed_tokens(input_ids)
1407
+
1408
+ batch_size, seq_length = input_shape
1409
+
1410
+ # required mask seq length can be calculated via length of past
1411
+ mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
1412
+
1413
+ if attention_mask is None:
1414
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
1415
+ if encoder_attention_mask is None and encoder_hidden_states is not None:
1416
+ encoder_seq_length = encoder_hidden_states.shape[1]
1417
+ encoder_attention_mask = torch.ones(
1418
+ batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
1419
+ )
1420
+
1421
+ # initialize past_key_values with `None` if past does not exist
1422
+ if past_key_values is None:
1423
+ past_key_values = [None] * len(self.layer)
1424
+
1425
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1426
+ # ourselves in which case we just need to make it broadcastable to all heads.
1427
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
1428
+
1429
+ # If a 2D or 3D attention mask is provided for the cross-attention
1430
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1431
+ if encoder_hidden_states is not None:
1432
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1433
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1434
+ if encoder_attention_mask is None:
1435
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
1436
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1437
+ else:
1438
+ encoder_extended_attention_mask = None
1439
+
1440
+ # Prepare head mask if needed
1441
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
1442
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
1443
+ present_key_value_states = () if use_cache else None
1444
+ all_hidden_states = () if output_hidden_states else None
1445
+ all_attentions = () if output_attentions else None
1446
+ all_cross_attentions = () if (output_attentions) else None
1447
+ position_bias = None
1448
+ encoder_decoder_position_bias = None
1449
+
1450
+ hidden_states = self.dropout(inputs_embeds)
1451
+
1452
+ for i, (layer_module, past_key_value) in enumerate(zip(self.layer, past_key_values)):
1453
+ layer_head_mask = head_mask[i]
1454
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
1455
+ if output_hidden_states:
1456
+ all_hidden_states = all_hidden_states + (hidden_states,)
1457
+
1458
+ if self.gradient_checkpointing and self.training:
1459
+ if use_cache:
1460
+ logger.warning(
1461
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1462
+ )
1463
+ use_cache = False
1464
+ layer_outputs = self._gradient_checkpointing_func(
1465
+ layer_module.forward,
1466
+ hidden_states,
1467
+ extended_attention_mask,
1468
+ position_bias,
1469
+ encoder_hidden_states,
1470
+ encoder_extended_attention_mask,
1471
+ encoder_decoder_position_bias,
1472
+ layer_head_mask,
1473
+ cross_attn_layer_head_mask,
1474
+ None, # past_key_value is always None with gradient checkpointing
1475
+ use_cache,
1476
+ output_attentions,
1477
+ )
1478
+ else:
1479
+ layer_outputs = layer_module(
1480
+ hidden_states,
1481
+ attention_mask=extended_attention_mask,
1482
+ position_bias=position_bias,
1483
+ encoder_hidden_states=encoder_hidden_states,
1484
+ encoder_attention_mask=encoder_extended_attention_mask,
1485
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
1486
+ layer_head_mask=layer_head_mask,
1487
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
1488
+ past_key_value=past_key_value,
1489
+ use_cache=use_cache,
1490
+ output_attentions=output_attentions,
1491
+ )
1492
+
1493
+ # layer_outputs is a tuple with:
1494
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
1495
+ if use_cache is False:
1496
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
1497
+
1498
+ hidden_states, present_key_value_state = layer_outputs[:2]
1499
+
1500
+ # We share the position biases between the layers - the first layer store them
1501
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
1502
+ # (cross-attention position bias), (cross-attention weights)
1503
+ position_bias = layer_outputs[2]
1504
+ if encoder_hidden_states is not None:
1505
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
1506
+ # append next layer key value states
1507
+ if use_cache:
1508
+ present_key_value_states = present_key_value_states + (present_key_value_state,)
1509
+
1510
+ if output_attentions:
1511
+ all_attentions = all_attentions + (layer_outputs[3],)
1512
+ if encoder_hidden_states is not None:
1513
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
1514
+
1515
+ hidden_states = self.final_layer_norm(hidden_states)
1516
+ hidden_states = self.dropout(hidden_states)
1517
+
1518
+ logits = self.lm_head(hidden_states)
1519
+
1520
+ # Add last layer
1521
+ if output_hidden_states:
1522
+ all_hidden_states = all_hidden_states + (hidden_states,)
1523
+
1524
+ loss = None
1525
+ if labels is not None:
1526
+ # move labels to correct device to enable model parallelism
1527
+ labels = labels.to(logits.device)
1528
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction="mean")
1529
+
1530
+ loss = loss_fct(logits.contiguous().view(-1, logits.size(-1)), labels.contiguous().view(-1))
1531
+
1532
+ if not return_dict:
1533
+ return tuple(
1534
+ v
1535
+ for v in [
1536
+ loss,
1537
+ logits,
1538
+ present_key_value_states,
1539
+ all_hidden_states,
1540
+ all_attentions,
1541
+ all_cross_attentions,
1542
+ ]
1543
+ if v is not None
1544
+ )
1545
+ return CausalLMOutputWithCrossAttentions(
1546
+ loss=loss,
1547
+ logits=logits,
1548
+ past_key_values=present_key_value_states,
1549
+ hidden_states=all_hidden_states,
1550
+ attentions=all_attentions,
1551
+ cross_attentions=all_cross_attentions,
1552
+ )
1553
+
1554
+
1555
+ @add_start_docstrings(
1556
+ "A conditional generation model with a language modeling head. Can be used for sequence generation tasks.",
1557
+ PIX2STRUCT_START_DOCSTRING,
1558
+ )
1559
+ class Pix2StructForConditionalGeneration(Pix2StructPreTrainedModel):
1560
+ config_class = Pix2StructConfig
1561
+ main_input_name = "flattened_patches"
1562
+ _tied_weights_keys = ["decoder.lm_head.weight"]
1563
+
1564
+ def __init__(self, config: Pix2StructConfig):
1565
+ super().__init__(config)
1566
+
1567
+ self.encoder = Pix2StructVisionModel(config.vision_config)
1568
+ self.decoder = Pix2StructTextModel(config.text_config)
1569
+
1570
+ self.is_vqa = config.is_vqa
1571
+
1572
+ # Initialize weights and apply final processing
1573
+ self.post_init()
1574
+
1575
+ def get_input_embeddings(self):
1576
+ return self.decoder.get_input_embeddings()
1577
+
1578
+ def set_input_embeddings(self, new_embeddings):
1579
+ self.decoder.set_input_embeddings(new_embeddings)
1580
+
1581
+ def get_output_embeddings(self) -> nn.Module:
1582
+ return self.decoder.get_output_embeddings()
1583
+
1584
+ def set_output_embeddings(self, new_embeddings):
1585
+ self.decoder.set_output_embeddings(new_embeddings)
1586
+
1587
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
1588
+ model_embeds = self.decoder.resize_token_embeddings(new_num_tokens)
1589
+
1590
+ # update vocab size
1591
+ self.config.text_config.vocab_size = new_num_tokens
1592
+
1593
+ return model_embeds
1594
+
1595
+ def get_decoder(self):
1596
+ return self.decoder
1597
+
1598
+ def get_encoder(self):
1599
+ return self.encoder
1600
+
1601
+ @add_start_docstrings_to_model_forward(PIX2STRUCT_INPUTS_DOCSTRING)
1602
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
1603
+ def forward(
1604
+ self,
1605
+ flattened_patches: Optional[torch.FloatTensor] = None,
1606
+ attention_mask: Optional[torch.FloatTensor] = None,
1607
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1608
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1609
+ head_mask: Optional[torch.FloatTensor] = None,
1610
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
1611
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1612
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1613
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1614
+ labels: Optional[torch.LongTensor] = None,
1615
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1616
+ use_cache: Optional[bool] = None,
1617
+ output_attentions: Optional[bool] = None,
1618
+ output_hidden_states: Optional[bool] = None,
1619
+ return_dict: Optional[bool] = None,
1620
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
1621
+ r"""
1622
+ Returns:
1623
+
1624
+ Example:
1625
+
1626
+ Inference:
1627
+
1628
+ ```python
1629
+ >>> from PIL import Image
1630
+ >>> import requests
1631
+ >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
1632
+
1633
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
1634
+ >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base")
1635
+
1636
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1637
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1638
+
1639
+ >>> inputs = processor(images=image, return_tensors="pt")
1640
+
1641
+ >>> # autoregressive generation
1642
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=50)
1643
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1644
+ >>> print(generated_text)
1645
+ A stop sign is on a street corner.
1646
+
1647
+ >>> # conditional generation
1648
+ >>> text = "A picture of"
1649
+ >>> inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False)
1650
+
1651
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=50)
1652
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1653
+ >>> print(generated_text)
1654
+ A picture of a stop sign with a red stop sign
1655
+ ```
1656
+
1657
+ Training:
1658
+
1659
+ ```python
1660
+ >>> from PIL import Image
1661
+ >>> import requests
1662
+ >>> from transformers import AutoProcessor, Pix2StructForConditionalGeneration
1663
+
1664
+ >>> processor = AutoProcessor.from_pretrained("google/pix2struct-base")
1665
+ >>> model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-base")
1666
+
1667
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
1668
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1669
+ >>> text = "A stop sign is on the street corner."
1670
+
1671
+ >>> inputs = processor(images=image, return_tensors="pt")
1672
+ >>> labels = processor(text=text, return_tensors="pt").input_ids
1673
+
1674
+ >>> # forward pass
1675
+ >>> outputs = model(**inputs, labels=labels)
1676
+ >>> loss = outputs.loss
1677
+ >>> print(f"{loss.item():.5f}")
1678
+ 5.94282
1679
+ ```"""
1680
+ use_cache = use_cache if use_cache is not None else self.config.text_config.use_cache
1681
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1682
+
1683
+ # Encode if needed (training, first prediction pass)
1684
+ if encoder_outputs is None:
1685
+ encoder_outputs = self.encoder(
1686
+ flattened_patches=flattened_patches,
1687
+ attention_mask=attention_mask,
1688
+ head_mask=head_mask,
1689
+ output_attentions=output_attentions,
1690
+ output_hidden_states=output_hidden_states,
1691
+ return_dict=return_dict,
1692
+ )
1693
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1694
+ encoder_outputs = BaseModelOutput(
1695
+ last_hidden_state=encoder_outputs[0],
1696
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1697
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1698
+ )
1699
+
1700
+ hidden_states = encoder_outputs[0]
1701
+
1702
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
1703
+ # get decoder inputs from shifting lm labels to the right
1704
+ decoder_input_ids = self._shift_right(labels)
1705
+ decoder_attention_mask = (
1706
+ decoder_attention_mask
1707
+ if decoder_attention_mask is not None
1708
+ else decoder_input_ids.ne(self.config.pad_token_id).float()
1709
+ )
1710
+ # Always attend to the first token
1711
+ decoder_attention_mask[:, 0] = 1
1712
+
1713
+ # Decode
1714
+ decoder_outputs = self.decoder(
1715
+ input_ids=decoder_input_ids,
1716
+ attention_mask=decoder_attention_mask,
1717
+ inputs_embeds=decoder_inputs_embeds,
1718
+ past_key_values=past_key_values,
1719
+ encoder_hidden_states=hidden_states,
1720
+ encoder_attention_mask=attention_mask,
1721
+ head_mask=decoder_head_mask,
1722
+ cross_attn_head_mask=cross_attn_head_mask,
1723
+ use_cache=use_cache,
1724
+ output_attentions=output_attentions,
1725
+ output_hidden_states=output_hidden_states,
1726
+ labels=labels,
1727
+ return_dict=return_dict,
1728
+ )
1729
+
1730
+ if not return_dict:
1731
+ return decoder_outputs + encoder_outputs
1732
+
1733
+ return Seq2SeqLMOutput(
1734
+ loss=decoder_outputs.loss,
1735
+ logits=decoder_outputs.logits,
1736
+ past_key_values=decoder_outputs.past_key_values,
1737
+ decoder_hidden_states=decoder_outputs.hidden_states,
1738
+ decoder_attentions=decoder_outputs.attentions,
1739
+ cross_attentions=decoder_outputs.cross_attentions,
1740
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1741
+ encoder_hidden_states=encoder_outputs.hidden_states,
1742
+ encoder_attentions=encoder_outputs.attentions,
1743
+ )
1744
+
1745
+ def prepare_inputs_for_generation(
1746
+ self,
1747
+ input_ids,
1748
+ flattened_patches: Optional[torch.FloatTensor] = None,
1749
+ attention_mask: Optional[torch.FloatTensor] = None,
1750
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1751
+ past_key_values=None,
1752
+ head_mask=None,
1753
+ decoder_head_mask=None,
1754
+ cross_attn_head_mask=None,
1755
+ use_cache=None,
1756
+ encoder_outputs=None,
1757
+ **kwargs,
1758
+ ):
1759
+ if decoder_attention_mask is None:
1760
+ decoder_attention_mask = torch.ones_like(input_ids).to(input_ids.device)
1761
+
1762
+ # cut decoder_input_ids if past_key_values is used
1763
+ if past_key_values is not None:
1764
+ past_length = past_key_values[0][0].shape[2]
1765
+
1766
+ # Some generation methods already pass only the last input ID
1767
+ if input_ids.shape[1] > past_length:
1768
+ remove_prefix_length = past_length
1769
+ else:
1770
+ # Default to old behavior: keep only final ID
1771
+ remove_prefix_length = input_ids.shape[1] - 1
1772
+
1773
+ input_ids = input_ids[:, remove_prefix_length:]
1774
+
1775
+ return {
1776
+ "flattened_patches": flattened_patches,
1777
+ "decoder_input_ids": input_ids,
1778
+ "past_key_values": past_key_values,
1779
+ "encoder_outputs": encoder_outputs,
1780
+ "attention_mask": attention_mask,
1781
+ "decoder_attention_mask": decoder_attention_mask,
1782
+ "head_mask": head_mask,
1783
+ "decoder_head_mask": decoder_head_mask,
1784
+ "cross_attn_head_mask": cross_attn_head_mask,
1785
+ "use_cache": use_cache,
1786
+ }
llmeval-env/lib/python3.10/site-packages/transformers/models/pix2struct/processing_pix2struct.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Pix2Struct.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class Pix2StructProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a PIX2STRUCT processor which wraps a BERT tokenizer and PIX2STRUCT image processor into a single
29
+ processor.
30
+
31
+ [`Pix2StructProcessor`] offers all the functionalities of [`Pix2StructImageProcessor`] and [`T5TokenizerFast`]. See
32
+ the docstring of [`~Pix2StructProcessor.__call__`] and [`~Pix2StructProcessor.decode`] for more information.
33
+
34
+ Args:
35
+ image_processor (`Pix2StructImageProcessor`):
36
+ An instance of [`Pix2StructImageProcessor`]. The image processor is a required input.
37
+ tokenizer (Union[`T5TokenizerFast`, `T5Tokenizer`]):
38
+ An instance of ['T5TokenizerFast`] or ['T5Tokenizer`]. The tokenizer is a required input.
39
+ """
40
+
41
+ attributes = ["image_processor", "tokenizer"]
42
+ image_processor_class = "Pix2StructImageProcessor"
43
+ tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
44
+
45
+ def __init__(self, image_processor, tokenizer):
46
+ tokenizer.return_token_type_ids = False
47
+ super().__init__(image_processor, tokenizer)
48
+
49
+ def __call__(
50
+ self,
51
+ images=None,
52
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
53
+ add_special_tokens: bool = True,
54
+ padding: Union[bool, str, PaddingStrategy] = False,
55
+ truncation: Union[bool, str, TruncationStrategy] = None,
56
+ max_length: Optional[int] = None,
57
+ max_patches: Optional[int] = 2048,
58
+ stride: int = 0,
59
+ pad_to_multiple_of: Optional[int] = None,
60
+ return_attention_mask: Optional[bool] = None,
61
+ return_overflowing_tokens: bool = False,
62
+ return_special_tokens_mask: bool = False,
63
+ return_offsets_mapping: bool = False,
64
+ return_token_type_ids: bool = False,
65
+ return_length: bool = False,
66
+ verbose: bool = True,
67
+ return_tensors: Optional[Union[str, TensorType]] = None,
68
+ **kwargs,
69
+ ) -> BatchEncoding:
70
+ """
71
+ This method uses [`Pix2StructImageProcessor.preprocess`] method to prepare image(s) for the model, and
72
+ [`T5TokenizerFast.__call__`] to prepare text for the model.
73
+
74
+ Please refer to the docstring of the above two methods for more information.
75
+ """
76
+ if images is None and text is None:
77
+ raise ValueError("You have to specify either images or text.")
78
+
79
+ # Get only text
80
+ if images is None and not self.image_processor.is_vqa:
81
+ self.current_processor = self.tokenizer
82
+ text_encoding = self.tokenizer(
83
+ text=text,
84
+ add_special_tokens=add_special_tokens,
85
+ padding=padding,
86
+ truncation=truncation,
87
+ max_length=max_length,
88
+ stride=stride,
89
+ pad_to_multiple_of=pad_to_multiple_of,
90
+ return_attention_mask=return_attention_mask,
91
+ return_overflowing_tokens=return_overflowing_tokens,
92
+ return_special_tokens_mask=return_special_tokens_mask,
93
+ return_offsets_mapping=return_offsets_mapping,
94
+ return_token_type_ids=return_token_type_ids,
95
+ return_length=return_length,
96
+ verbose=verbose,
97
+ return_tensors=return_tensors,
98
+ **kwargs,
99
+ )
100
+ return text_encoding
101
+
102
+ if not self.image_processor.is_vqa:
103
+ # add pixel_values
104
+ encoding_image_processor = self.image_processor(
105
+ images, return_tensors=return_tensors, max_patches=max_patches, **kwargs
106
+ )
107
+ else:
108
+ # add pixel_values and bbox
109
+ encoding_image_processor = self.image_processor(
110
+ images, return_tensors=return_tensors, max_patches=max_patches, header_text=text, **kwargs
111
+ )
112
+
113
+ if text is not None and not self.image_processor.is_vqa:
114
+ text_encoding = self.tokenizer(
115
+ text=text,
116
+ add_special_tokens=add_special_tokens,
117
+ padding=padding,
118
+ truncation=truncation,
119
+ max_length=max_length,
120
+ stride=stride,
121
+ pad_to_multiple_of=pad_to_multiple_of,
122
+ return_attention_mask=return_attention_mask,
123
+ return_overflowing_tokens=return_overflowing_tokens,
124
+ return_special_tokens_mask=return_special_tokens_mask,
125
+ return_offsets_mapping=return_offsets_mapping,
126
+ return_token_type_ids=return_token_type_ids,
127
+ return_length=return_length,
128
+ verbose=verbose,
129
+ return_tensors=return_tensors,
130
+ **kwargs,
131
+ )
132
+
133
+ if "attention_mask" in text_encoding:
134
+ text_encoding["decoder_attention_mask"] = text_encoding.pop("attention_mask")
135
+ if "input_ids" in text_encoding:
136
+ text_encoding["decoder_input_ids"] = text_encoding.pop("input_ids")
137
+ else:
138
+ text_encoding = None
139
+
140
+ if text_encoding is not None:
141
+ encoding_image_processor.update(text_encoding)
142
+
143
+ return encoding_image_processor
144
+
145
+ def batch_decode(self, *args, **kwargs):
146
+ """
147
+ This method forwards all its arguments to Pix2StructTokenizerFast's [`~PreTrainedTokenizer.batch_decode`].
148
+ Please refer to the docstring of this method for more information.
149
+ """
150
+ return self.tokenizer.batch_decode(*args, **kwargs)
151
+
152
+ def decode(self, *args, **kwargs):
153
+ """
154
+ This method forwards all its arguments to Pix2StructTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please
155
+ refer to the docstring of this method for more information.
156
+ """
157
+ return self.tokenizer.decode(*args, **kwargs)
158
+
159
+ @property
160
+ def model_input_names(self):
161
+ tokenizer_input_names = self.tokenizer.model_input_names
162
+ image_processor_input_names = self.image_processor.model_input_names
163
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__init__.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_sam": [
27
+ "SAM_PRETRAINED_CONFIG_ARCHIVE_MAP",
28
+ "SamConfig",
29
+ "SamMaskDecoderConfig",
30
+ "SamPromptEncoderConfig",
31
+ "SamVisionConfig",
32
+ ],
33
+ "processing_sam": ["SamProcessor"],
34
+ }
35
+
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_sam"] = [
44
+ "SAM_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "SamModel",
46
+ "SamPreTrainedModel",
47
+ ]
48
+ try:
49
+ if not is_tf_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ _import_structure["modeling_tf_sam"] = [
55
+ "TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST",
56
+ "TFSamModel",
57
+ "TFSamPreTrainedModel",
58
+ ]
59
+ try:
60
+ if not is_vision_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ _import_structure["image_processing_sam"] = ["SamImageProcessor"]
66
+
67
+
68
+ if TYPE_CHECKING:
69
+ from .configuration_sam import (
70
+ SAM_PRETRAINED_CONFIG_ARCHIVE_MAP,
71
+ SamConfig,
72
+ SamMaskDecoderConfig,
73
+ SamPromptEncoderConfig,
74
+ SamVisionConfig,
75
+ )
76
+ from .processing_sam import SamProcessor
77
+
78
+ try:
79
+ if not is_torch_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ from .modeling_sam import SAM_PRETRAINED_MODEL_ARCHIVE_LIST, SamModel, SamPreTrainedModel
85
+
86
+ try:
87
+ if not is_tf_available():
88
+ raise OptionalDependencyNotAvailable()
89
+ except OptionalDependencyNotAvailable:
90
+ pass
91
+ else:
92
+ from .modeling_tf_sam import TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST, TFSamModel, TFSamPreTrainedModel
93
+
94
+ try:
95
+ if not is_vision_available():
96
+ raise OptionalDependencyNotAvailable()
97
+ except OptionalDependencyNotAvailable:
98
+ pass
99
+ else:
100
+ from .image_processing_sam import SamImageProcessor
101
+
102
+ else:
103
+ import sys
104
+
105
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/configuration_sam.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_to_hf.cpython-310.pyc ADDED
Binary file (5.96 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc ADDED
Binary file (49.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc ADDED
Binary file (48.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc ADDED
Binary file (54.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/__pycache__/processing_sam.cpython-310.pyc ADDED
Binary file (7.47 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/configuration_sam.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ SAM model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import SAM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class SamPromptEncoderConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`SamPromptEncoder`]. The [`SamPromptEncoder`]
31
+ module is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield
32
+ a similar configuration to that of the SAM-vit-h
33
+ [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+ Args:
39
+ hidden_size (`int`, *optional*, defaults to 256):
40
+ Dimensionality of the hidden states.
41
+ image_size (`int`, *optional*, defaults to 1024):
42
+ The expected output resolution of the image.
43
+ patch_size (`int`, *optional*, defaults to 16):
44
+ The size (resolution) of each patch.
45
+ mask_input_channels (`int`, *optional*, defaults to 16):
46
+ The number of channels to be fed to the `MaskDecoder` module.
47
+ num_point_embeddings (`int`, *optional*, defaults to 4):
48
+ The number of point embeddings to be used.
49
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
50
+ The non-linear activation function in the encoder and pooler.
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ hidden_size=256,
56
+ image_size=1024,
57
+ patch_size=16,
58
+ mask_input_channels=16,
59
+ num_point_embeddings=4,
60
+ hidden_act="gelu",
61
+ layer_norm_eps=1e-6,
62
+ **kwargs,
63
+ ):
64
+ super().__init__(**kwargs)
65
+ self.hidden_size = hidden_size
66
+ self.image_size = image_size
67
+ self.patch_size = patch_size
68
+ self.image_embedding_size = image_size // patch_size
69
+ self.mask_input_channels = mask_input_channels
70
+ self.num_point_embeddings = num_point_embeddings
71
+ self.hidden_act = hidden_act
72
+ self.layer_norm_eps = layer_norm_eps
73
+
74
+
75
+ class SamMaskDecoderConfig(PretrainedConfig):
76
+ r"""
77
+ This is the configuration class to store the configuration of a [`SamMaskDecoder`]. It is used to instantiate a SAM
78
+ mask decoder to the specified arguments, defining the model architecture. Instantiating a configuration defaults
79
+ will yield a similar configuration to that of the SAM-vit-h
80
+ [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
81
+
82
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
83
+ documentation from [`PretrainedConfig`] for more information.
84
+
85
+ Args:
86
+ hidden_size (`int`, *optional*, defaults to 256):
87
+ Dimensionality of the hidden states.
88
+ hidden_act (`str`, *optional*, defaults to `"relu"`):
89
+ The non-linear activation function used inside the `SamMaskDecoder` module.
90
+ mlp_dim (`int`, *optional*, defaults to 2048):
91
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
92
+ num_hidden_layers (`int`, *optional*, defaults to 2):
93
+ Number of hidden layers in the Transformer encoder.
94
+ num_attention_heads (`int`, *optional*, defaults to 8):
95
+ Number of attention heads for each attention layer in the Transformer encoder.
96
+ attention_downsample_rate (`int`, *optional*, defaults to 2):
97
+ The downsampling rate of the attention layer.
98
+ num_multimask_outputs (`int`, *optional*, defaults to 3):
99
+ The number of outputs from the `SamMaskDecoder` module. In the Segment Anything paper, this is set to 3.
100
+ iou_head_depth (`int`, *optional*, defaults to 3):
101
+ The number of layers in the IoU head module.
102
+ iou_head_hidden_dim (`int`, *optional*, defaults to 256):
103
+ The dimensionality of the hidden states in the IoU head module.
104
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
105
+ The epsilon used by the layer normalization layers.
106
+
107
+ """
108
+
109
+ def __init__(
110
+ self,
111
+ hidden_size=256,
112
+ hidden_act="relu",
113
+ mlp_dim=2048,
114
+ num_hidden_layers=2,
115
+ num_attention_heads=8,
116
+ attention_downsample_rate=2,
117
+ num_multimask_outputs=3,
118
+ iou_head_depth=3,
119
+ iou_head_hidden_dim=256,
120
+ layer_norm_eps=1e-6,
121
+ **kwargs,
122
+ ):
123
+ super().__init__(**kwargs)
124
+ self.hidden_size = hidden_size
125
+ self.hidden_act = hidden_act
126
+ self.mlp_dim = mlp_dim
127
+ self.num_hidden_layers = num_hidden_layers
128
+ self.num_attention_heads = num_attention_heads
129
+ self.attention_downsample_rate = attention_downsample_rate
130
+ self.num_multimask_outputs = num_multimask_outputs
131
+ self.iou_head_depth = iou_head_depth
132
+ self.iou_head_hidden_dim = iou_head_hidden_dim
133
+ self.layer_norm_eps = layer_norm_eps
134
+
135
+
136
+ class SamVisionConfig(PretrainedConfig):
137
+ r"""
138
+ This is the configuration class to store the configuration of a [`SamVisionModel`]. It is used to instantiate a SAM
139
+ vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
140
+ defaults will yield a similar configuration to that of the SAM ViT-h
141
+ [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
142
+
143
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
144
+ documentation from [`PretrainedConfig`] for more information.
145
+
146
+ Args:
147
+ hidden_size (`int`, *optional*, defaults to 768):
148
+ Dimensionality of the encoder layers and the pooler layer.
149
+ output_channels (`int`, *optional*, defaults to 256):
150
+ Dimensionality of the output channels in the Patch Encoder.
151
+ num_hidden_layers (`int`, *optional*, defaults to 12):
152
+ Number of hidden layers in the Transformer encoder.
153
+ num_attention_heads (`int`, *optional*, defaults to 12):
154
+ Number of attention heads for each attention layer in the Transformer encoder.
155
+ num_channels (`int`, *optional*, defaults to 3):
156
+ Number of channels in the input image.
157
+ image_size (`int`, *optional*, defaults to 1024):
158
+ Expected resolution. Target size of the resized input image.
159
+ patch_size (`int`, *optional*, defaults to 16):
160
+ Size of the patches to be extracted from the input image.
161
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
162
+ The non-linear activation function (function or string)
163
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
164
+ The epsilon used by the layer normalization layers.
165
+ attention_dropout (`float`, *optional*, defaults to 0.0):
166
+ The dropout ratio for the attention probabilities.
167
+ initializer_range (`float`, *optional*, defaults to 1e-10):
168
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
169
+ qkv_bias (`bool`, *optional*, defaults to `True`):
170
+ Whether to add a bias to query, key, value projections.
171
+ mlp_ratio (`float`, *optional*, defaults to 4.0):
172
+ Ratio of mlp hidden dim to embedding dim.
173
+ use_abs_pos (`bool`, *optional*, defaults to `True`):
174
+ Whether to use absolute position embedding.
175
+ use_rel_pos (`bool`, *optional*, defaults to `True`):
176
+ Whether to use relative position embedding.
177
+ window_size (`int`, *optional*, defaults to 14):
178
+ Window size for relative position.
179
+ global_attn_indexes (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`):
180
+ The indexes of the global attention layers.
181
+ num_pos_feats (`int`, *optional*, defaults to 128):
182
+ The dimensionality of the position embedding.
183
+ mlp_dim (`int`, *optional*):
184
+ The dimensionality of the MLP layer in the Transformer encoder. If `None`, defaults to `mlp_ratio *
185
+ hidden_size`.
186
+ """
187
+
188
+ def __init__(
189
+ self,
190
+ hidden_size=768,
191
+ output_channels=256,
192
+ num_hidden_layers=12,
193
+ num_attention_heads=12,
194
+ num_channels=3,
195
+ image_size=1024,
196
+ patch_size=16,
197
+ hidden_act="gelu",
198
+ layer_norm_eps=1e-06,
199
+ attention_dropout=0.0,
200
+ initializer_range=1e-10,
201
+ qkv_bias=True,
202
+ mlp_ratio=4.0,
203
+ use_abs_pos=True,
204
+ use_rel_pos=True,
205
+ window_size=14,
206
+ global_attn_indexes=[2, 5, 8, 11],
207
+ num_pos_feats=128,
208
+ mlp_dim=None,
209
+ **kwargs,
210
+ ):
211
+ super().__init__(**kwargs)
212
+
213
+ self.hidden_size = hidden_size
214
+ self.output_channels = output_channels
215
+ self.num_hidden_layers = num_hidden_layers
216
+ self.num_attention_heads = num_attention_heads
217
+ self.num_channels = num_channels
218
+ self.image_size = image_size
219
+ self.patch_size = patch_size
220
+ self.hidden_act = hidden_act
221
+ self.layer_norm_eps = layer_norm_eps
222
+ self.attention_dropout = attention_dropout
223
+ self.initializer_range = initializer_range
224
+ self.qkv_bias = qkv_bias
225
+ self.mlp_ratio = mlp_ratio
226
+ self.use_abs_pos = use_abs_pos
227
+ self.use_rel_pos = use_rel_pos
228
+ self.window_size = window_size
229
+ self.global_attn_indexes = global_attn_indexes
230
+ self.num_pos_feats = num_pos_feats
231
+ self.mlp_dim = int(hidden_size * mlp_ratio) if mlp_dim is None else mlp_dim
232
+
233
+
234
+ class SamConfig(PretrainedConfig):
235
+ r"""
236
+ [`SamConfig`] is the configuration class to store the configuration of a [`SamModel`]. It is used to instantiate a
237
+ SAM model according to the specified arguments, defining the vision model, prompt-encoder model and mask decoder
238
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the
239
+ SAM-ViT-H [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
240
+
241
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
242
+ documentation from [`PretrainedConfig`] for more information.
243
+
244
+ Args:
245
+ vision_config (Union[`dict`, `SamVisionConfig`], *optional*):
246
+ Dictionary of configuration options used to initialize [`SamVisionConfig`].
247
+ prompt_encoder_config (Union[`dict`, `SamPromptEncoderConfig`], *optional*):
248
+ Dictionary of configuration options used to initialize [`SamPromptEncoderConfig`].
249
+ mask_decoder_config (Union[`dict`, `SamMaskDecoderConfig`], *optional*):
250
+ Dictionary of configuration options used to initialize [`SamMaskDecoderConfig`].
251
+
252
+ kwargs (*optional*):
253
+ Dictionary of keyword arguments.
254
+
255
+ Example:
256
+
257
+ ```python
258
+ >>> from transformers import (
259
+ ... SamVisionConfig,
260
+ ... SamPromptEncoderConfig,
261
+ ... SamMaskDecoderConfig,
262
+ ... SamModel,
263
+ ... )
264
+
265
+ >>> # Initializing a SamConfig with `"facebook/sam-vit-huge"` style configuration
266
+ >>> configuration = SamConfig()
267
+
268
+ >>> # Initializing a SamModel (with random weights) from the `"facebook/sam-vit-huge"` style configuration
269
+ >>> model = SamModel(configuration)
270
+
271
+ >>> # Accessing the model configuration
272
+ >>> configuration = model.config
273
+
274
+ >>> # We can also initialize a SamConfig from a SamVisionConfig, SamPromptEncoderConfig, and SamMaskDecoderConfig
275
+
276
+ >>> # Initializing SAM vision, SAM Q-Former and language model configurations
277
+ >>> vision_config = SamVisionConfig()
278
+ >>> prompt_encoder_config = SamPromptEncoderConfig()
279
+ >>> mask_decoder_config = SamMaskDecoderConfig()
280
+
281
+ >>> config = SamConfig(vision_config, prompt_encoder_config, mask_decoder_config)
282
+ ```"""
283
+
284
+ model_type = "sam"
285
+
286
+ def __init__(
287
+ self,
288
+ vision_config=None,
289
+ prompt_encoder_config=None,
290
+ mask_decoder_config=None,
291
+ initializer_range=0.02,
292
+ **kwargs,
293
+ ):
294
+ super().__init__(**kwargs)
295
+ vision_config = vision_config if vision_config is not None else {}
296
+ prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
297
+ mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
298
+
299
+ if isinstance(vision_config, SamVisionConfig):
300
+ vision_config = vision_config.to_dict()
301
+ if isinstance(prompt_encoder_config, SamPromptEncoderConfig):
302
+ prompt_encoder_config = prompt_encoder_config.to_dict()
303
+ if isinstance(mask_decoder_config, SamMaskDecoderConfig):
304
+ mask_decoder_config = mask_decoder_config.to_dict()
305
+
306
+ self.vision_config = SamVisionConfig(**vision_config)
307
+ self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config)
308
+ self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config)
309
+ self.initializer_range = initializer_range
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/convert_sam_to_hf.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Convert SAM checkpoints from the original repository.
17
+
18
+ URL: https://github.com/facebookresearch/segment-anything.
19
+
20
+ Also supports converting the SlimSAM checkpoints from https://github.com/czg1225/SlimSAM/tree/master.
21
+ """
22
+ import argparse
23
+ import re
24
+
25
+ import numpy as np
26
+ import requests
27
+ import torch
28
+ from huggingface_hub import hf_hub_download
29
+ from PIL import Image
30
+
31
+ from transformers import (
32
+ SamConfig,
33
+ SamImageProcessor,
34
+ SamModel,
35
+ SamProcessor,
36
+ SamVisionConfig,
37
+ )
38
+
39
+
40
+ def get_config(model_name):
41
+ if "slimsam-50" in model_name:
42
+ vision_config = SamVisionConfig(
43
+ hidden_size=384,
44
+ mlp_dim=1536,
45
+ num_hidden_layers=12,
46
+ num_attention_heads=12,
47
+ global_attn_indexes=[2, 5, 8, 11],
48
+ )
49
+ elif "slimsam-77" in model_name:
50
+ vision_config = SamVisionConfig(
51
+ hidden_size=168,
52
+ mlp_dim=696,
53
+ num_hidden_layers=12,
54
+ num_attention_heads=12,
55
+ global_attn_indexes=[2, 5, 8, 11],
56
+ )
57
+ elif "sam_vit_b" in model_name:
58
+ vision_config = SamVisionConfig()
59
+ elif "sam_vit_l" in model_name:
60
+ vision_config = SamVisionConfig(
61
+ hidden_size=1024,
62
+ num_hidden_layers=24,
63
+ num_attention_heads=16,
64
+ global_attn_indexes=[5, 11, 17, 23],
65
+ )
66
+ elif "sam_vit_h" in model_name:
67
+ vision_config = SamVisionConfig(
68
+ hidden_size=1280,
69
+ num_hidden_layers=32,
70
+ num_attention_heads=16,
71
+ global_attn_indexes=[7, 15, 23, 31],
72
+ )
73
+
74
+ config = SamConfig(
75
+ vision_config=vision_config,
76
+ )
77
+
78
+ return config
79
+
80
+
81
+ KEYS_TO_MODIFY_MAPPING = {
82
+ "iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
83
+ "iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
84
+ "iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
85
+ "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
86
+ "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
87
+ "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
88
+ "mask_downscaling.0": "mask_embed.conv1",
89
+ "mask_downscaling.1": "mask_embed.layer_norm1",
90
+ "mask_downscaling.3": "mask_embed.conv2",
91
+ "mask_downscaling.4": "mask_embed.layer_norm2",
92
+ "mask_downscaling.6": "mask_embed.conv3",
93
+ "point_embeddings": "point_embed",
94
+ "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
95
+ "image_encoder": "vision_encoder",
96
+ "neck.0": "neck.conv1",
97
+ "neck.1": "neck.layer_norm1",
98
+ "neck.2": "neck.conv2",
99
+ "neck.3": "neck.layer_norm2",
100
+ "patch_embed.proj": "patch_embed.projection",
101
+ ".norm": ".layer_norm",
102
+ "blocks": "layers",
103
+ }
104
+
105
+
106
+ def replace_keys(state_dict):
107
+ model_state_dict = {}
108
+ state_dict.pop("pixel_mean", None)
109
+ state_dict.pop("pixel_std", None)
110
+
111
+ output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
112
+
113
+ for key, value in state_dict.items():
114
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
115
+ if key_to_modify in key:
116
+ key = key.replace(key_to_modify, new_key)
117
+
118
+ if re.match(output_hypernetworks_mlps_pattern, key):
119
+ layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2))
120
+ if layer_nb == 0:
121
+ key = key.replace("layers.0", "proj_in")
122
+ elif layer_nb == 1:
123
+ key = key.replace("layers.1", "layers.0")
124
+ elif layer_nb == 2:
125
+ key = key.replace("layers.2", "proj_out")
126
+
127
+ model_state_dict[key] = value
128
+
129
+ model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[
130
+ "prompt_encoder.shared_embedding.positional_embedding"
131
+ ]
132
+
133
+ return model_state_dict
134
+
135
+
136
+ def convert_sam_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub):
137
+ config = get_config(model_name)
138
+
139
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
140
+ state_dict = replace_keys(state_dict)
141
+
142
+ image_processor = SamImageProcessor()
143
+ processor = SamProcessor(image_processor=image_processor)
144
+ hf_model = SamModel(config)
145
+ hf_model.eval()
146
+
147
+ device = "cuda" if torch.cuda.is_available() else "cpu"
148
+
149
+ hf_model.load_state_dict(state_dict)
150
+ hf_model = hf_model.to(device)
151
+
152
+ img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
153
+ raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
154
+
155
+ input_points = [[[500, 375]]]
156
+ input_labels = [[1]]
157
+
158
+ inputs = processor(images=np.array(raw_image), return_tensors="pt").to(device)
159
+
160
+ with torch.no_grad():
161
+ output = hf_model(**inputs)
162
+ scores = output.iou_scores.squeeze()
163
+
164
+ if model_name == "sam_vit_b_01ec64":
165
+ inputs = processor(
166
+ images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt"
167
+ ).to(device)
168
+
169
+ with torch.no_grad():
170
+ output = hf_model(**inputs)
171
+ scores = output.iou_scores.squeeze()
172
+
173
+ elif model_name == "sam_vit_h_4b8939":
174
+ inputs = processor(
175
+ images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt"
176
+ ).to(device)
177
+
178
+ with torch.no_grad():
179
+ output = hf_model(**inputs)
180
+ scores = output.iou_scores.squeeze()
181
+
182
+ assert scores[-1].item() == 0.9712603092193604
183
+
184
+ input_boxes = ((75, 275, 1725, 850),)
185
+
186
+ inputs = processor(images=np.array(raw_image), input_boxes=input_boxes, return_tensors="pt").to(device)
187
+
188
+ with torch.no_grad():
189
+ output = hf_model(**inputs)
190
+ scores = output.iou_scores.squeeze()
191
+
192
+ assert scores[-1].item() == 0.8686015605926514
193
+
194
+ # Test with 2 points and 1 image.
195
+ input_points = [[[400, 650], [800, 650]]]
196
+ input_labels = [[1, 1]]
197
+
198
+ inputs = processor(
199
+ images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt"
200
+ ).to(device)
201
+
202
+ with torch.no_grad():
203
+ output = hf_model(**inputs)
204
+ scores = output.iou_scores.squeeze()
205
+
206
+ assert scores[-1].item() == 0.9936047792434692
207
+
208
+ if pytorch_dump_folder is not None:
209
+ processor.save_pretrained(pytorch_dump_folder)
210
+ hf_model.save_pretrained(pytorch_dump_folder)
211
+
212
+ if push_to_hub:
213
+ repo_id = f"nielsr/{model_name}" if "slimsam" in model_name else f"meta/{model_name}"
214
+ processor.push_to_hub(repo_id)
215
+ hf_model.push_to_hub(repo_id)
216
+
217
+
218
+ if __name__ == "__main__":
219
+ parser = argparse.ArgumentParser()
220
+ choices = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195", "slimsam-50-uniform", "slimsam-77-uniform"]
221
+ parser.add_argument(
222
+ "--model_name",
223
+ default="sam_vit_h_4b8939",
224
+ choices=choices,
225
+ type=str,
226
+ help="Name of the original model to convert",
227
+ )
228
+ parser.add_argument(
229
+ "--checkpoint_path",
230
+ type=str,
231
+ required=False,
232
+ help="Path to the original checkpoint",
233
+ )
234
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
235
+ parser.add_argument(
236
+ "--push_to_hub",
237
+ action="store_true",
238
+ help="Whether to push the model and processor to the hub after converting",
239
+ )
240
+
241
+ args = parser.parse_args()
242
+
243
+ if "slimsam" in args.model_name:
244
+ checkpoint_path = args.checkpoint_path
245
+ if checkpoint_path is None:
246
+ raise ValueError("You need to provide a checkpoint path for SlimSAM models.")
247
+ else:
248
+ checkpoint_path = hf_hub_download("ybelkada/segment-anything", f"checkpoints/{args.model_name}.pth")
249
+
250
+ convert_sam_checkpoint(args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/image_processing_sam.py ADDED
@@ -0,0 +1,1496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for SAM."""
16
+ import math
17
+ from copy import deepcopy
18
+ from itertools import product
19
+ from typing import Any, Dict, List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
24
+ from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format
25
+ from ...image_utils import (
26
+ IMAGENET_DEFAULT_MEAN,
27
+ IMAGENET_DEFAULT_STD,
28
+ ChannelDimension,
29
+ ImageInput,
30
+ PILImageResampling,
31
+ get_image_size,
32
+ infer_channel_dimension_format,
33
+ is_scaled_image,
34
+ make_list_of_images,
35
+ to_numpy_array,
36
+ valid_images,
37
+ validate_kwargs,
38
+ validate_preprocess_arguments,
39
+ )
40
+ from ...utils import (
41
+ TensorType,
42
+ is_tf_available,
43
+ is_torch_available,
44
+ is_torchvision_available,
45
+ logging,
46
+ requires_backends,
47
+ )
48
+
49
+
50
+ if is_torch_available():
51
+ import torch
52
+ import torch.nn.functional as F
53
+
54
+ if is_torchvision_available():
55
+ from torchvision.ops.boxes import batched_nms
56
+
57
+ if is_tf_available():
58
+ import tensorflow as tf
59
+ from tensorflow.experimental import numpy as tnp
60
+
61
+ from ...tf_utils import flatten, shape_list
62
+
63
+ logger = logging.get_logger(__name__)
64
+
65
+
66
+ class SamImageProcessor(BaseImageProcessor):
67
+ r"""
68
+ Constructs a SAM image processor.
69
+
70
+ Args:
71
+ do_resize (`bool`, *optional*, defaults to `True`):
72
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
73
+ `do_resize` parameter in the `preprocess` method.
74
+ size (`dict`, *optional*, defaults to `{"longest_edge": 1024}`):
75
+ Size of the output image after resizing. Resizes the longest edge of the image to match
76
+ `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `size` parameter in the
77
+ `preprocess` method.
78
+ mask_size (`dict`, *optional*, defaults to `{"longest_edge": 256}`):
79
+ Size of the output segmentation map after resizing. Resizes the longest edge of the image to match
80
+ `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `mask_size` parameter
81
+ in the `preprocess` method.
82
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
83
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
84
+ `preprocess` method.
85
+ do_rescale (`bool`, *optional*, defaults to `True`):
86
+ Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
87
+ `do_rescale` parameter in the `preprocess` method.
88
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
89
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
90
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
91
+ do_normalize (`bool`, *optional*, defaults to `True`):
92
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
93
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
94
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
95
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
96
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
97
+ overridden by the `image_mean` parameter in the `preprocess` method.
98
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
99
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
100
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
101
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
102
+ do_pad (`bool`, *optional*, defaults to `True`):
103
+ Whether to pad the image to the specified `pad_size`. Can be overridden by the `do_pad` parameter in the
104
+ `preprocess` method.
105
+ pad_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`):
106
+ Size of the output image after padding. Can be overridden by the `pad_size` parameter in the `preprocess`
107
+ method.
108
+ mask_pad_size (`dict`, *optional*, defaults to `{"height": 256, "width": 256}`):
109
+ Size of the output segmentation map after padding. Can be overridden by the `mask_pad_size` parameter in
110
+ the `preprocess` method.
111
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
112
+ Whether to convert the image to RGB.
113
+ """
114
+
115
+ model_input_names = ["pixel_values"]
116
+
117
+ def __init__(
118
+ self,
119
+ do_resize: bool = True,
120
+ size: Dict[str, int] = None,
121
+ mask_size: Dict[str, int] = None,
122
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
123
+ do_rescale: bool = True,
124
+ rescale_factor: Union[int, float] = 1 / 255,
125
+ do_normalize: bool = True,
126
+ image_mean: Optional[Union[float, List[float]]] = None,
127
+ image_std: Optional[Union[float, List[float]]] = None,
128
+ do_pad: bool = True,
129
+ pad_size: int = None,
130
+ mask_pad_size: int = None,
131
+ do_convert_rgb: bool = True,
132
+ **kwargs,
133
+ ) -> None:
134
+ super().__init__(**kwargs)
135
+ size = size if size is not None else {"longest_edge": 1024}
136
+ size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size
137
+
138
+ pad_size = pad_size if pad_size is not None else {"height": 1024, "width": 1024}
139
+ pad_size = get_size_dict(pad_size, default_to_square=True)
140
+
141
+ mask_size = mask_size if mask_size is not None else {"longest_edge": 256}
142
+ mask_size = (
143
+ get_size_dict(max_size=mask_size, default_to_square=False)
144
+ if not isinstance(mask_size, dict)
145
+ else mask_size
146
+ )
147
+
148
+ mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 256, "width": 256}
149
+ mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True)
150
+
151
+ self.do_resize = do_resize
152
+ self.size = size
153
+ self.mask_size = mask_size
154
+ self.resample = resample
155
+ self.do_rescale = do_rescale
156
+ self.rescale_factor = rescale_factor
157
+ self.do_normalize = do_normalize
158
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
159
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
160
+ self.do_pad = do_pad
161
+ self.pad_size = pad_size
162
+ self.mask_pad_size = mask_pad_size
163
+ self.do_convert_rgb = do_convert_rgb
164
+ self._valid_processor_keys = [
165
+ "images",
166
+ "segmentation_maps",
167
+ "do_resize",
168
+ "size",
169
+ "mask_size",
170
+ "resample",
171
+ "do_rescale",
172
+ "rescale_factor",
173
+ "do_normalize",
174
+ "image_mean",
175
+ "image_std",
176
+ "do_pad",
177
+ "pad_size",
178
+ "mask_pad_size",
179
+ "do_convert_rgb",
180
+ "return_tensors",
181
+ "data_format",
182
+ "input_data_format",
183
+ ]
184
+
185
+ def pad_image(
186
+ self,
187
+ image: np.ndarray,
188
+ pad_size: Dict[str, int],
189
+ data_format: Optional[Union[str, ChannelDimension]] = None,
190
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
191
+ **kwargs,
192
+ ) -> np.ndarray:
193
+ """
194
+ Pad an image to `(pad_size["height"], pad_size["width"])` with zeros to the right and bottom.
195
+
196
+ Args:
197
+ image (`np.ndarray`):
198
+ Image to pad.
199
+ pad_size (`Dict[str, int]`):
200
+ Size of the output image after padding.
201
+ data_format (`str` or `ChannelDimension`, *optional*):
202
+ The data format of the image. Can be either "channels_first" or "channels_last". If `None`, the
203
+ `data_format` of the `image` will be used.
204
+ input_data_format (`str` or `ChannelDimension`, *optional*):
205
+ The channel dimension format of the input image. If not provided, it will be inferred.
206
+ """
207
+ output_height, output_width = pad_size["height"], pad_size["width"]
208
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
209
+
210
+ pad_width = output_width - input_width
211
+ pad_height = output_height - input_height
212
+
213
+ padded_image = pad(
214
+ image,
215
+ ((0, pad_height), (0, pad_width)),
216
+ data_format=data_format,
217
+ input_data_format=input_data_format,
218
+ **kwargs,
219
+ )
220
+ return padded_image
221
+
222
+ def _get_preprocess_shape(self, old_shape: Tuple[int, int], longest_edge: int):
223
+ """
224
+ Compute the output size given input size and target long side length.
225
+ """
226
+ oldh, oldw = old_shape
227
+ scale = longest_edge * 1.0 / max(oldh, oldw)
228
+ newh, neww = oldh * scale, oldw * scale
229
+ newh = int(newh + 0.5)
230
+ neww = int(neww + 0.5)
231
+ return (newh, neww)
232
+
233
+ def resize(
234
+ self,
235
+ image: np.ndarray,
236
+ size: Dict[str, int],
237
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
238
+ data_format: Optional[Union[str, ChannelDimension]] = None,
239
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
240
+ **kwargs,
241
+ ) -> np.ndarray:
242
+ """
243
+ Resize an image to `(size["height"], size["width"])`.
244
+
245
+ Args:
246
+ image (`np.ndarray`):
247
+ Image to resize.
248
+ size (`Dict[str, int]`):
249
+ Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest
250
+ edge of the image will be resized to the specified size, while the other edge will be resized to
251
+ maintain the aspect ratio.
252
+ resample:
253
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
254
+ data_format (`ChannelDimension` or `str`, *optional*):
255
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
256
+ image is used. Can be one of:
257
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
258
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
259
+ input_data_format (`ChannelDimension` or `str`, *optional*):
260
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
261
+ from the input image. Can be one of:
262
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
263
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
264
+
265
+ Returns:
266
+ `np.ndarray`: The resized image.
267
+ """
268
+ size = get_size_dict(size)
269
+ if "longest_edge" not in size:
270
+ raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}")
271
+ input_size = get_image_size(image, channel_dim=input_data_format)
272
+ output_height, output_width = self._get_preprocess_shape(input_size, size["longest_edge"])
273
+ return resize(
274
+ image,
275
+ size=(output_height, output_width),
276
+ resample=resample,
277
+ data_format=data_format,
278
+ input_data_format=input_data_format,
279
+ **kwargs,
280
+ )
281
+
282
+ def _preprocess(
283
+ self,
284
+ image: ImageInput,
285
+ do_resize: bool,
286
+ do_rescale: bool,
287
+ do_normalize: bool,
288
+ size: Optional[Dict[str, int]] = None,
289
+ resample: PILImageResampling = None,
290
+ rescale_factor: Optional[float] = None,
291
+ image_mean: Optional[Union[float, List[float]]] = None,
292
+ image_std: Optional[Union[float, List[float]]] = None,
293
+ do_pad: Optional[bool] = None,
294
+ pad_size: Optional[Dict[str, int]] = None,
295
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
296
+ ):
297
+ if do_resize:
298
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
299
+ reshaped_input_size = get_image_size(image, channel_dim=input_data_format)
300
+
301
+ if do_rescale:
302
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
303
+
304
+ if do_normalize:
305
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
306
+
307
+ if do_pad:
308
+ image = self.pad_image(image=image, pad_size=pad_size, input_data_format=input_data_format)
309
+
310
+ return image, reshaped_input_size
311
+
312
+ def _preprocess_image(
313
+ self,
314
+ image: ImageInput,
315
+ do_resize: Optional[bool] = None,
316
+ size: Dict[str, int] = None,
317
+ resample: PILImageResampling = None,
318
+ do_rescale: bool = None,
319
+ rescale_factor: Optional[float] = None,
320
+ do_normalize: Optional[bool] = None,
321
+ image_mean: Optional[Union[float, List[float]]] = None,
322
+ image_std: Optional[Union[float, List[float]]] = None,
323
+ do_pad: Optional[bool] = None,
324
+ pad_size: Optional[Dict[str, int]] = None,
325
+ do_convert_rgb: Optional[bool] = None,
326
+ data_format: Optional[Union[str, ChannelDimension]] = None,
327
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
328
+ ) -> Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]]:
329
+ image = to_numpy_array(image)
330
+
331
+ # PIL RGBA images are converted to RGB
332
+ if do_convert_rgb:
333
+ image = convert_to_rgb(image)
334
+
335
+ # All transformations expect numpy arrays.
336
+ image = to_numpy_array(image)
337
+
338
+ if is_scaled_image(image) and do_rescale:
339
+ logger.warning_once(
340
+ "It looks like you are trying to rescale already rescaled images. If the input"
341
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
342
+ )
343
+
344
+ if input_data_format is None:
345
+ input_data_format = infer_channel_dimension_format(image)
346
+
347
+ original_size = get_image_size(image, channel_dim=input_data_format)
348
+
349
+ image, reshaped_input_size = self._preprocess(
350
+ image=image,
351
+ do_resize=do_resize,
352
+ size=size,
353
+ resample=resample,
354
+ do_rescale=do_rescale,
355
+ rescale_factor=rescale_factor,
356
+ do_normalize=do_normalize,
357
+ image_mean=image_mean,
358
+ image_std=image_std,
359
+ do_pad=do_pad,
360
+ pad_size=pad_size,
361
+ input_data_format=input_data_format,
362
+ )
363
+
364
+ if data_format is not None:
365
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
366
+
367
+ return image, original_size, reshaped_input_size
368
+
369
+ def _preprocess_mask(
370
+ self,
371
+ segmentation_map: ImageInput,
372
+ do_resize: Optional[bool] = None,
373
+ mask_size: Dict[str, int] = None,
374
+ do_pad: Optional[bool] = None,
375
+ mask_pad_size: Optional[Dict[str, int]] = None,
376
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
377
+ ) -> np.ndarray:
378
+ segmentation_map = to_numpy_array(segmentation_map)
379
+
380
+ # Add channel dimension if missing - needed for certain transformations
381
+ if segmentation_map.ndim == 2:
382
+ added_channel_dim = True
383
+ segmentation_map = segmentation_map[None, ...]
384
+ input_data_format = ChannelDimension.FIRST
385
+ else:
386
+ added_channel_dim = False
387
+ if input_data_format is None:
388
+ input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
389
+
390
+ original_size = get_image_size(segmentation_map, channel_dim=input_data_format)
391
+
392
+ segmentation_map, _ = self._preprocess(
393
+ image=segmentation_map,
394
+ do_resize=do_resize,
395
+ size=mask_size,
396
+ resample=PILImageResampling.NEAREST,
397
+ do_rescale=False,
398
+ do_normalize=False,
399
+ do_pad=do_pad,
400
+ pad_size=mask_pad_size,
401
+ input_data_format=input_data_format,
402
+ )
403
+
404
+ # Remove extra channel dimension if added for processing
405
+ if added_channel_dim:
406
+ segmentation_map = segmentation_map.squeeze(0)
407
+ segmentation_map = segmentation_map.astype(np.int64)
408
+
409
+ return segmentation_map, original_size
410
+
411
+ def preprocess(
412
+ self,
413
+ images: ImageInput,
414
+ segmentation_maps: Optional[ImageInput] = None,
415
+ do_resize: Optional[bool] = None,
416
+ size: Optional[Dict[str, int]] = None,
417
+ mask_size: Optional[Dict[str, int]] = None,
418
+ resample: Optional["PILImageResampling"] = None,
419
+ do_rescale: Optional[bool] = None,
420
+ rescale_factor: Optional[Union[int, float]] = None,
421
+ do_normalize: Optional[bool] = None,
422
+ image_mean: Optional[Union[float, List[float]]] = None,
423
+ image_std: Optional[Union[float, List[float]]] = None,
424
+ do_pad: Optional[bool] = None,
425
+ pad_size: Optional[Dict[str, int]] = None,
426
+ mask_pad_size: Optional[Dict[str, int]] = None,
427
+ do_convert_rgb: Optional[bool] = None,
428
+ return_tensors: Optional[Union[str, TensorType]] = None,
429
+ data_format: ChannelDimension = ChannelDimension.FIRST,
430
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
431
+ **kwargs,
432
+ ):
433
+ """
434
+ Preprocess an image or batch of images.
435
+
436
+ Args:
437
+ images (`ImageInput`):
438
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
439
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
440
+ segmentation_maps (`ImageInput`, *optional*):
441
+ Segmentation map to preprocess.
442
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
443
+ Whether to resize the image.
444
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
445
+ Controls the size of the image after `resize`. The longest edge of the image is resized to
446
+ `size["longest_edge"]` whilst preserving the aspect ratio.
447
+ mask_size (`Dict[str, int]`, *optional*, defaults to `self.mask_size`):
448
+ Controls the size of the segmentation map after `resize`. The longest edge of the image is resized to
449
+ `size["longest_edge"]` whilst preserving the aspect ratio.
450
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
451
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
452
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
453
+ Whether to rescale the image pixel values by rescaling factor.
454
+ rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
455
+ Rescale factor to apply to the image pixel values.
456
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
457
+ Whether to normalize the image.
458
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
459
+ Image mean to normalize the image by if `do_normalize` is set to `True`.
460
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
461
+ Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
462
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
463
+ Whether to pad the image.
464
+ pad_size (`Dict[str, int]`, *optional*, defaults to `self.pad_size`):
465
+ Controls the size of the padding applied to the image. The image is padded to `pad_size["height"]` and
466
+ `pad_size["width"]` if `do_pad` is set to `True`.
467
+ mask_pad_size (`Dict[str, int]`, *optional*, defaults to `self.mask_pad_size`):
468
+ Controls the size of the padding applied to the segmentation map. The image is padded to
469
+ `mask_pad_size["height"]` and `mask_pad_size["width"]` if `do_pad` is set to `True`.
470
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
471
+ Whether to convert the image to RGB.
472
+ return_tensors (`str` or `TensorType`, *optional*):
473
+ The type of tensors to return. Can be one of:
474
+ - Unset: Return a list of `np.ndarray`.
475
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
476
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
477
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
478
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
479
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
480
+ The channel dimension format for the output image. Can be one of:
481
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
482
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
483
+ - Unset: Use the channel dimension format of the input image.
484
+ input_data_format (`ChannelDimension` or `str`, *optional*):
485
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
486
+ from the input image. Can be one of:
487
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
488
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
489
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
490
+ """
491
+ do_resize = do_resize if do_resize is not None else self.do_resize
492
+ size = size if size is not None else self.size
493
+ size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size
494
+ mask_size = mask_size if mask_size is not None else self.mask_size
495
+ mask_size = (
496
+ get_size_dict(max_size=mask_size, default_to_square=False)
497
+ if not isinstance(mask_size, dict)
498
+ else mask_size
499
+ )
500
+ resample = resample if resample is not None else self.resample
501
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
502
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
503
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
504
+ image_mean = image_mean if image_mean is not None else self.image_mean
505
+ image_std = image_std if image_std is not None else self.image_std
506
+ do_pad = do_pad if do_pad is not None else self.do_pad
507
+ pad_size = pad_size if pad_size is not None else self.pad_size
508
+ pad_size = get_size_dict(pad_size, default_to_square=True)
509
+ mask_pad_size = mask_pad_size if mask_pad_size is not None else self.mask_pad_size
510
+ mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True)
511
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
512
+
513
+ images = make_list_of_images(images)
514
+
515
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
516
+
517
+ if not valid_images(images):
518
+ raise ValueError(
519
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
520
+ "torch.Tensor, tf.Tensor or jax.ndarray."
521
+ )
522
+
523
+ if segmentation_maps is not None:
524
+ segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
525
+
526
+ if not valid_images(segmentation_maps):
527
+ raise ValueError(
528
+ "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, "
529
+ "torch.Tensor, tf.Tensor or jax.ndarray."
530
+ )
531
+ validate_preprocess_arguments(
532
+ do_rescale=do_rescale,
533
+ rescale_factor=rescale_factor,
534
+ do_normalize=do_normalize,
535
+ image_mean=image_mean,
536
+ image_std=image_std,
537
+ do_pad=do_pad,
538
+ size_divisibility=pad_size, # Here _preprocess needs do_pad and pad_size.
539
+ do_resize=do_resize,
540
+ size=size,
541
+ resample=resample,
542
+ )
543
+
544
+ images, original_sizes, reshaped_input_sizes = zip(
545
+ *(
546
+ self._preprocess_image(
547
+ image=img,
548
+ do_resize=do_resize,
549
+ size=size,
550
+ resample=resample,
551
+ do_rescale=do_rescale,
552
+ rescale_factor=rescale_factor,
553
+ do_normalize=do_normalize,
554
+ image_mean=image_mean,
555
+ image_std=image_std,
556
+ do_pad=do_pad,
557
+ pad_size=pad_size,
558
+ do_convert_rgb=do_convert_rgb,
559
+ data_format=data_format,
560
+ input_data_format=input_data_format,
561
+ )
562
+ for img in images
563
+ )
564
+ )
565
+
566
+ data = {
567
+ "pixel_values": images,
568
+ "original_sizes": original_sizes,
569
+ "reshaped_input_sizes": reshaped_input_sizes,
570
+ }
571
+
572
+ if segmentation_maps is not None:
573
+ segmentation_maps, original_mask_sizes = zip(
574
+ *(
575
+ self._preprocess_mask(
576
+ segmentation_map=mask,
577
+ do_resize=do_resize,
578
+ mask_size=mask_size,
579
+ do_pad=do_pad,
580
+ mask_pad_size=mask_pad_size,
581
+ input_data_format=input_data_format,
582
+ )
583
+ for mask in segmentation_maps
584
+ )
585
+ )
586
+
587
+ # masks should start out the same size as input images
588
+ assert all(
589
+ original_im_size == original_mask_size
590
+ for original_im_size, original_mask_size in zip(original_sizes, original_mask_sizes)
591
+ ), "Segmentation maps should be the same size as input images."
592
+
593
+ data["labels"] = segmentation_maps
594
+
595
+ return BatchFeature(data=data, tensor_type=return_tensors)
596
+
597
+ def post_process_masks(
598
+ self,
599
+ masks,
600
+ original_sizes,
601
+ reshaped_input_sizes,
602
+ mask_threshold=0.0,
603
+ binarize=True,
604
+ pad_size=None,
605
+ return_tensors="pt",
606
+ ):
607
+ """
608
+ Remove padding and upscale masks to the original image size.
609
+
610
+ Args:
611
+ masks (`Union[List[torch.Tensor], List[np.ndarray], List[tf.Tensor]]`):
612
+ Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
613
+ original_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`):
614
+ The original sizes of each image before it was resized to the model's expected input shape, in (height,
615
+ width) format.
616
+ reshaped_input_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`):
617
+ The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
618
+ mask_threshold (`float`, *optional*, defaults to 0.0):
619
+ The threshold to use for binarizing the masks.
620
+ binarize (`bool`, *optional*, defaults to `True`):
621
+ Whether to binarize the masks.
622
+ pad_size (`int`, *optional*, defaults to `self.pad_size`):
623
+ The target size the images were padded to before being passed to the model. If None, the target size is
624
+ assumed to be the processor's `pad_size`.
625
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
626
+ If `"pt"`, return PyTorch tensors. If `"tf"`, return TensorFlow tensors.
627
+ Returns:
628
+ (`Union[torch.Tensor, tf.Tensor]`): Batched masks in batch_size, num_channels, height, width) format, where
629
+ (height, width) is given by original_size.
630
+ """
631
+ if return_tensors == "pt":
632
+ return self._post_process_masks_pt(
633
+ masks=masks,
634
+ original_sizes=original_sizes,
635
+ reshaped_input_sizes=reshaped_input_sizes,
636
+ mask_threshold=mask_threshold,
637
+ binarize=binarize,
638
+ pad_size=pad_size,
639
+ )
640
+ elif return_tensors == "tf":
641
+ return self._post_process_masks_tf(
642
+ masks=masks,
643
+ original_sizes=original_sizes,
644
+ reshaped_input_sizes=reshaped_input_sizes,
645
+ mask_threshold=mask_threshold,
646
+ binarize=binarize,
647
+ pad_size=pad_size,
648
+ )
649
+ else:
650
+ raise ValueError("return_tensors must be either 'pt' or 'tf'")
651
+
652
+ def _post_process_masks_pt(
653
+ self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None
654
+ ):
655
+ """
656
+ Remove padding and upscale masks to the original image size.
657
+
658
+ Args:
659
+ masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
660
+ Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
661
+ original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
662
+ The original sizes of each image before it was resized to the model's expected input shape, in (height,
663
+ width) format.
664
+ reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
665
+ The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
666
+ mask_threshold (`float`, *optional*, defaults to 0.0):
667
+ The threshold to use for binarizing the masks.
668
+ binarize (`bool`, *optional*, defaults to `True`):
669
+ Whether to binarize the masks.
670
+ pad_size (`int`, *optional*, defaults to `self.pad_size`):
671
+ The target size the images were padded to before being passed to the model. If None, the target size is
672
+ assumed to be the processor's `pad_size`.
673
+ Returns:
674
+ (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
675
+ is given by original_size.
676
+ """
677
+ requires_backends(self, ["torch"])
678
+ pad_size = self.pad_size if pad_size is None else pad_size
679
+ target_image_size = (pad_size["height"], pad_size["width"])
680
+ if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
681
+ original_sizes = original_sizes.tolist()
682
+ if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)):
683
+ reshaped_input_sizes = reshaped_input_sizes.tolist()
684
+ output_masks = []
685
+ for i, original_size in enumerate(original_sizes):
686
+ if isinstance(masks[i], np.ndarray):
687
+ masks[i] = torch.from_numpy(masks[i])
688
+ elif not isinstance(masks[i], torch.Tensor):
689
+ raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
690
+ interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False)
691
+ interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]]
692
+ interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False)
693
+ if binarize:
694
+ interpolated_mask = interpolated_mask > mask_threshold
695
+ output_masks.append(interpolated_mask)
696
+
697
+ return output_masks
698
+
699
+ def _post_process_masks_tf(
700
+ self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None
701
+ ):
702
+ """
703
+ Remove padding and upscale masks to the original image size.
704
+
705
+ Args:
706
+ masks (`tf.Tensor`):
707
+ Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
708
+ original_sizes (`tf.Tensor`):
709
+ The original size of the images before resizing for input to the model, in (height, width) format.
710
+ reshaped_input_sizes (`tf.Tensor`):
711
+ The size of the image input to the model, in (height, width) format. Used to remove padding.
712
+ mask_threshold (`float`, *optional*, defaults to 0.0):
713
+ The threshold to use for binarizing the masks.
714
+ binarize (`bool`, *optional*, defaults to `True`):
715
+ Whether to binarize the masks.
716
+ pad_size (`int`, *optional*, defaults to `self.pad_size`):
717
+ The target size the images were padded to before being passed to the model. If None, the target size is
718
+ assumed to be the processor's `pad_size`.
719
+ Returns:
720
+ (`tf.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is
721
+ given by original_size.
722
+ """
723
+ requires_backends(self, ["tf"])
724
+ pad_size = self.pad_size if pad_size is None else pad_size
725
+ target_image_size = (pad_size["height"], pad_size["width"])
726
+
727
+ output_masks = []
728
+ for i, original_size in enumerate(original_sizes):
729
+ # tf.image expects NHWC, we transpose the NCHW inputs for it
730
+ mask = tf.transpose(masks[i], perm=[0, 2, 3, 1])
731
+ interpolated_mask = tf.image.resize(mask, target_image_size, method="bilinear")
732
+ interpolated_mask = interpolated_mask[:, : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1], :]
733
+ interpolated_mask = tf.image.resize(interpolated_mask, original_size, method="bilinear")
734
+ if binarize:
735
+ interpolated_mask = interpolated_mask > mask_threshold
736
+ # And then we transpose them back at the end
737
+ output_masks.append(tf.transpose(interpolated_mask, perm=[0, 3, 1, 2]))
738
+
739
+ return output_masks
740
+
741
+ def post_process_for_mask_generation(
742
+ self, all_masks, all_scores, all_boxes, crops_nms_thresh, return_tensors="pt"
743
+ ):
744
+ """
745
+ Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks.
746
+
747
+ Args:
748
+ all_masks (`Union[List[torch.Tensor], List[tf.Tensor]]`):
749
+ List of all predicted segmentation masks
750
+ all_scores (`Union[List[torch.Tensor], List[tf.Tensor]]`):
751
+ List of all predicted iou scores
752
+ all_boxes (`Union[List[torch.Tensor], List[tf.Tensor]]`):
753
+ List of all bounding boxes of the predicted masks
754
+ crops_nms_thresh (`float`):
755
+ Threshold for NMS (Non Maximum Suppression) algorithm.
756
+ return_tensors (`str`, *optional*, defaults to `pt`):
757
+ If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
758
+ """
759
+ if return_tensors == "pt":
760
+ return _postprocess_for_mg(all_masks, all_scores, all_boxes, crops_nms_thresh)
761
+ elif return_tensors == "tf":
762
+ return _postprocess_for_mg_tf(all_masks, all_scores, all_boxes, crops_nms_thresh)
763
+
764
+ def generate_crop_boxes(
765
+ self,
766
+ image,
767
+ target_size,
768
+ crop_n_layers: int = 0,
769
+ overlap_ratio: float = 512 / 1500,
770
+ points_per_crop: Optional[int] = 32,
771
+ crop_n_points_downscale_factor: Optional[List[int]] = 1,
772
+ device: Optional["torch.device"] = None,
773
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
774
+ return_tensors: str = "pt",
775
+ ):
776
+ """
777
+ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
778
+
779
+ Args:
780
+ image (`np.array`):
781
+ Input original image
782
+ target_size (`int`):
783
+ Target size of the resized image
784
+ crop_n_layers (`int`, *optional*, defaults to 0):
785
+ If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where
786
+ each layer has 2**i_layer number of image crops.
787
+ overlap_ratio (`float`, *optional*, defaults to 512/1500):
788
+ Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
789
+ the image length. Later layers with more crops scale down this overlap.
790
+ points_per_crop (`int`, *optional*, defaults to 32):
791
+ Number of points to sample from each crop.
792
+ crop_n_points_downscale_factor (`List[int]`, *optional*, defaults to 1):
793
+ The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
794
+ device (`torch.device`, *optional*, defaults to None):
795
+ Device to use for the computation. If None, cpu will be used.
796
+ input_data_format (`str` or `ChannelDimension`, *optional*):
797
+ The channel dimension format of the input image. If not provided, it will be inferred.
798
+ return_tensors (`str`, *optional*, defaults to `pt`):
799
+ If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
800
+ """
801
+ crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes(
802
+ image,
803
+ target_size,
804
+ crop_n_layers,
805
+ overlap_ratio,
806
+ points_per_crop,
807
+ crop_n_points_downscale_factor,
808
+ input_data_format,
809
+ )
810
+ if return_tensors == "pt":
811
+ if device is None:
812
+ device = torch.device("cpu")
813
+ crop_boxes = torch.tensor(crop_boxes, device=device)
814
+ points_per_crop = torch.tensor(points_per_crop, device=device)
815
+ # cropped_images stays as np
816
+ input_labels = torch.tensor(input_labels, device=device)
817
+
818
+ elif return_tensors == "tf":
819
+ if device is not None:
820
+ raise ValueError("device is not a supported argument when return_tensors is tf!")
821
+ crop_boxes = tf.convert_to_tensor(crop_boxes)
822
+ points_per_crop = tf.convert_to_tensor(points_per_crop)
823
+ # cropped_images stays as np
824
+ input_labels = tf.convert_to_tensor(input_labels)
825
+ else:
826
+ raise ValueError("return_tensors must be either 'pt' or 'tf'.")
827
+ return crop_boxes, points_per_crop, cropped_images, input_labels
828
+
829
+ def filter_masks(
830
+ self,
831
+ masks,
832
+ iou_scores,
833
+ original_size,
834
+ cropped_box_image,
835
+ pred_iou_thresh=0.88,
836
+ stability_score_thresh=0.95,
837
+ mask_threshold=0,
838
+ stability_score_offset=1,
839
+ return_tensors="pt",
840
+ ):
841
+ """
842
+ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
843
+ that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
844
+ score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
845
+ bounding boxes and pad the predicted masks if necessary.
846
+
847
+ Args:
848
+ masks (`Union[torch.Tensor, tf.Tensor]`):
849
+ Input masks.
850
+ iou_scores (`Union[torch.Tensor, tf.Tensor]`):
851
+ List of IoU scores.
852
+ original_size (`Tuple[int,int]`):
853
+ Size of the orginal image.
854
+ cropped_box_image (`np.array`):
855
+ The cropped image.
856
+ pred_iou_thresh (`float`, *optional*, defaults to 0.88):
857
+ The threshold for the iou scores.
858
+ stability_score_thresh (`float`, *optional*, defaults to 0.95):
859
+ The threshold for the stability score.
860
+ mask_threshold (`float`, *optional*, defaults to 0):
861
+ The threshold for the predicted masks.
862
+ stability_score_offset (`float`, *optional*, defaults to 1):
863
+ The offset for the stability score used in the `_compute_stability_score` method.
864
+ return_tensors (`str`, *optional*, defaults to `pt`):
865
+ If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
866
+ """
867
+ if return_tensors == "pt":
868
+ return self._filter_masks_pt(
869
+ masks=masks,
870
+ iou_scores=iou_scores,
871
+ original_size=original_size,
872
+ cropped_box_image=cropped_box_image,
873
+ pred_iou_thresh=pred_iou_thresh,
874
+ stability_score_thresh=stability_score_thresh,
875
+ mask_threshold=mask_threshold,
876
+ stability_score_offset=stability_score_offset,
877
+ )
878
+ elif return_tensors == "tf":
879
+ return self._filter_masks_tf(
880
+ masks=masks,
881
+ iou_scores=iou_scores,
882
+ original_size=original_size,
883
+ cropped_box_image=cropped_box_image,
884
+ pred_iou_thresh=pred_iou_thresh,
885
+ stability_score_thresh=stability_score_thresh,
886
+ mask_threshold=mask_threshold,
887
+ stability_score_offset=stability_score_offset,
888
+ )
889
+
890
+ def _filter_masks_pt(
891
+ self,
892
+ masks,
893
+ iou_scores,
894
+ original_size,
895
+ cropped_box_image,
896
+ pred_iou_thresh=0.88,
897
+ stability_score_thresh=0.95,
898
+ mask_threshold=0,
899
+ stability_score_offset=1,
900
+ ):
901
+ """
902
+ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
903
+ that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
904
+ score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
905
+ bounding boxes and pad the predicted masks if necessary.
906
+
907
+ Args:
908
+ masks (`torch.Tensor`):
909
+ Input masks.
910
+ iou_scores (`torch.Tensor`):
911
+ List of IoU scores.
912
+ original_size (`Tuple[int,int]`):
913
+ Size of the orginal image.
914
+ cropped_box_image (`np.array`):
915
+ The cropped image.
916
+ pred_iou_thresh (`float`, *optional*, defaults to 0.88):
917
+ The threshold for the iou scores.
918
+ stability_score_thresh (`float`, *optional*, defaults to 0.95):
919
+ The threshold for the stability score.
920
+ mask_threshold (`float`, *optional*, defaults to 0):
921
+ The threshold for the predicted masks.
922
+ stability_score_offset (`float`, *optional*, defaults to 1):
923
+ The offset for the stability score used in the `_compute_stability_score` method.
924
+
925
+ """
926
+ requires_backends(self, ["torch"])
927
+ original_height, original_width = original_size
928
+ iou_scores = iou_scores.flatten(0, 1)
929
+ masks = masks.flatten(0, 1)
930
+
931
+ if masks.shape[0] != iou_scores.shape[0]:
932
+ raise ValueError("masks and iou_scores must have the same batch size.")
933
+
934
+ if masks.device != iou_scores.device:
935
+ iou_scores = iou_scores.to(masks.device)
936
+
937
+ batch_size = masks.shape[0]
938
+
939
+ keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device)
940
+
941
+ if pred_iou_thresh > 0.0:
942
+ keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
943
+
944
+ # compute stability score
945
+ if stability_score_thresh > 0.0:
946
+ stability_scores = _compute_stability_score_pt(masks, mask_threshold, stability_score_offset)
947
+ keep_mask = keep_mask & (stability_scores > stability_score_thresh)
948
+
949
+ scores = iou_scores[keep_mask]
950
+ masks = masks[keep_mask]
951
+
952
+ # binarize masks
953
+ masks = masks > mask_threshold
954
+ converted_boxes = _batched_mask_to_box(masks)
955
+
956
+ keep_mask = ~_is_box_near_crop_edge(
957
+ converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
958
+ )
959
+
960
+ scores = scores[keep_mask]
961
+ masks = masks[keep_mask]
962
+ converted_boxes = converted_boxes[keep_mask]
963
+
964
+ masks = _pad_masks(masks, cropped_box_image, original_height, original_width)
965
+ # conversion to rle is necessary to run non-maximum suppresion
966
+ masks = _mask_to_rle_pytorch(masks)
967
+
968
+ return masks, scores, converted_boxes
969
+
970
+ def _filter_masks_tf(
971
+ self,
972
+ masks,
973
+ iou_scores,
974
+ original_size,
975
+ cropped_box_image,
976
+ pred_iou_thresh=0.88,
977
+ stability_score_thresh=0.95,
978
+ mask_threshold=0,
979
+ stability_score_offset=1,
980
+ ):
981
+ """
982
+ Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
983
+ that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
984
+ score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
985
+ bounding boxes and pad the predicted masks if necessary.
986
+
987
+ Args:
988
+ masks (`tf.Tensor`):
989
+ Input masks.
990
+ iou_scores (`tf.Tensor`):
991
+ List of IoU scores.
992
+ original_size (`Tuple[int,int]`):
993
+ Size of the orginal image.
994
+ cropped_box_image (`np.array`):
995
+ The cropped image.
996
+ pred_iou_thresh (`float`, *optional*, defaults to 0.88):
997
+ The threshold for the iou scores.
998
+ stability_score_thresh (`float`, *optional*, defaults to 0.95):
999
+ The threshold for the stability score.
1000
+ mask_threshold (`float`, *optional*, defaults to 0):
1001
+ The threshold for the predicted masks.
1002
+ stability_score_offset (`float`, *optional*, defaults to 1):
1003
+ The offset for the stability score used in the `_compute_stability_score` method.
1004
+
1005
+ """
1006
+ requires_backends(self, ["tf"])
1007
+ original_height, original_width = original_size
1008
+ iou_scores = tf.reshape(iou_scores, [iou_scores.shape[0] * iou_scores.shape[1], iou_scores.shape[2:]])
1009
+ masks = tf.reshape(masks, [masks.shape[0] * masks.shape[1], masks.shape[2:]])
1010
+
1011
+ if masks.shape[0] != iou_scores.shape[0]:
1012
+ raise ValueError("masks and iou_scores must have the same batch size.")
1013
+
1014
+ batch_size = masks.shape[0]
1015
+
1016
+ keep_mask = tf.ones(batch_size, dtype=tf.bool)
1017
+
1018
+ if pred_iou_thresh > 0.0:
1019
+ keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
1020
+
1021
+ # compute stability score
1022
+ if stability_score_thresh > 0.0:
1023
+ stability_scores = _compute_stability_score_tf(masks, mask_threshold, stability_score_offset)
1024
+ keep_mask = keep_mask & (stability_scores > stability_score_thresh)
1025
+
1026
+ scores = iou_scores[keep_mask]
1027
+ masks = masks[keep_mask]
1028
+
1029
+ # binarize masks
1030
+ masks = masks > mask_threshold
1031
+ converted_boxes = _batched_mask_to_box_tf(masks)
1032
+
1033
+ keep_mask = ~_is_box_near_crop_edge_tf(
1034
+ converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
1035
+ )
1036
+
1037
+ scores = scores[keep_mask]
1038
+ masks = masks[keep_mask]
1039
+ converted_boxes = converted_boxes[keep_mask]
1040
+
1041
+ masks = _pad_masks_tf(masks, cropped_box_image, original_height, original_width)
1042
+ # conversion to rle is necessary to run non-maximum suppresion
1043
+ masks = _mask_to_rle_tf(masks)
1044
+
1045
+ return masks, scores, converted_boxes
1046
+
1047
+
1048
+ def _compute_stability_score_pt(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int):
1049
+ # One mask is always contained inside the other.
1050
+ # Save memory by preventing unnecesary cast to torch.int64
1051
+ intersections = (
1052
+ (masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
1053
+ )
1054
+ unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
1055
+ stability_scores = intersections / unions
1056
+ return stability_scores
1057
+
1058
+
1059
+ def _compute_stability_score_tf(masks: "tf.Tensor", mask_threshold: float, stability_score_offset: int):
1060
+ # Torch does Py3-style division but TF does floor division with ints. We cast to float32 in TF to make sure
1061
+ # we get the right division results.
1062
+ intersections = tf.count_nonzero(
1063
+ masks > (mask_threshold + stability_score_offset), axis=[-1, -2], dtype=tf.float32
1064
+ )
1065
+ unions = tf.count_nonzero(masks > (mask_threshold - stability_score_offset), axis=[-1, -2], dtype=tf.float32)
1066
+ stability_scores = intersections / unions
1067
+ return stability_scores
1068
+
1069
+
1070
+ def _build_point_grid(n_per_side: int) -> np.ndarray:
1071
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
1072
+ offset = 1 / (2 * n_per_side)
1073
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
1074
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
1075
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
1076
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
1077
+ return points
1078
+
1079
+
1080
+ def _normalize_coordinates(
1081
+ target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False
1082
+ ) -> np.ndarray:
1083
+ """
1084
+ Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width)
1085
+ format.
1086
+ """
1087
+ old_height, old_width = original_size
1088
+
1089
+ scale = target_size * 1.0 / max(old_height, old_width)
1090
+ new_height, new_width = old_height * scale, old_width * scale
1091
+ new_width = int(new_width + 0.5)
1092
+ new_height = int(new_height + 0.5)
1093
+
1094
+ coords = deepcopy(coords).astype(float)
1095
+
1096
+ if is_bounding_box:
1097
+ coords = coords.reshape(-1, 2, 2)
1098
+
1099
+ coords[..., 0] = coords[..., 0] * (new_width / old_width)
1100
+ coords[..., 1] = coords[..., 1] * (new_height / old_height)
1101
+
1102
+ if is_bounding_box:
1103
+ coords = coords.reshape(-1, 4)
1104
+
1105
+ return coords
1106
+
1107
+
1108
+ def _generate_crop_boxes(
1109
+ image,
1110
+ target_size: int, # Is it tuple here?
1111
+ crop_n_layers: int = 0,
1112
+ overlap_ratio: float = 512 / 1500,
1113
+ points_per_crop: Optional[int] = 32,
1114
+ crop_n_points_downscale_factor: Optional[List[int]] = 1,
1115
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1116
+ ) -> Tuple[List[List[int]], List[int]]:
1117
+ """
1118
+ Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
1119
+
1120
+ Args:
1121
+ image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]):
1122
+ Image to generate crops for.
1123
+ target_size (`int`):
1124
+ Size of the smallest crop.
1125
+ crop_n_layers (`int`, *optional*):
1126
+ If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers
1127
+ to run, where each layer has 2**i_layer number of image crops.
1128
+ overlap_ratio (`int`, *optional*):
1129
+ Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the
1130
+ image length. Later layers with more crops scale down this overlap.
1131
+ points_per_crop (`int`, *optional*):
1132
+ Number of points to sample per crop.
1133
+ crop_n_points_downscale_factor (`int`, *optional*):
1134
+ The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
1135
+ input_data_format (`str` or `ChannelDimension`, *optional*):
1136
+ The channel dimension format of the input image. If not provided, it will be inferred.
1137
+ """
1138
+
1139
+ if isinstance(image, list):
1140
+ raise ValueError("Only one image is allowed for crop generation.")
1141
+ image = to_numpy_array(image)
1142
+ original_size = get_image_size(image, input_data_format)
1143
+
1144
+ points_grid = []
1145
+ for i in range(crop_n_layers + 1):
1146
+ n_points = int(points_per_crop / (crop_n_points_downscale_factor**i))
1147
+ points_grid.append(_build_point_grid(n_points))
1148
+
1149
+ crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size)
1150
+
1151
+ cropped_images, point_grid_per_crop = _generate_crop_images(
1152
+ crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format
1153
+ )
1154
+ crop_boxes = np.array(crop_boxes)
1155
+ crop_boxes = crop_boxes.astype(np.float32)
1156
+ points_per_crop = np.array([point_grid_per_crop])
1157
+ points_per_crop = np.transpose(points_per_crop, axes=(0, 2, 1, 3))
1158
+
1159
+ input_labels = np.ones_like(points_per_crop[:, :, :, 0], dtype=np.int64)
1160
+
1161
+ return crop_boxes, points_per_crop, cropped_images, input_labels
1162
+
1163
+
1164
+ def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size):
1165
+ """
1166
+ Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format
1167
+ consists of the following required indices:
1168
+ - X: X coordinate of the top left of the bounding box
1169
+ - Y: Y coordinate of the top left of the bounding box
1170
+ - W: width of the bounding box
1171
+ - H: height of the bounding box
1172
+ """
1173
+ crop_boxes, layer_idxs = [], []
1174
+ im_height, im_width = original_size
1175
+ short_side = min(im_height, im_width)
1176
+
1177
+ # Original image
1178
+ crop_boxes.append([0, 0, im_width, im_height])
1179
+ layer_idxs.append(0)
1180
+ for i_layer in range(crop_n_layers):
1181
+ n_crops_per_side = 2 ** (i_layer + 1)
1182
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
1183
+
1184
+ crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side))
1185
+ crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side))
1186
+
1187
+ crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)]
1188
+ crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)]
1189
+
1190
+ for left, top in product(crop_box_x0, crop_box_y0):
1191
+ box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)]
1192
+ crop_boxes.append(box)
1193
+ layer_idxs.append(i_layer + 1)
1194
+
1195
+ return crop_boxes, layer_idxs
1196
+
1197
+
1198
+ def _generate_crop_images(
1199
+ crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None
1200
+ ):
1201
+ """
1202
+ Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are
1203
+ also passed.
1204
+ """
1205
+ cropped_images = []
1206
+ total_points_per_crop = []
1207
+ for i, crop_box in enumerate(crop_boxes):
1208
+ left, top, right, bottom = crop_box
1209
+
1210
+ channel_dim = infer_channel_dimension_format(image, input_data_format)
1211
+ if channel_dim == ChannelDimension.LAST:
1212
+ cropped_im = image[top:bottom, left:right, :]
1213
+ else:
1214
+ cropped_im = image[:, top:bottom, left:right]
1215
+
1216
+ cropped_images.append(cropped_im)
1217
+
1218
+ cropped_im_size = get_image_size(cropped_im, channel_dim)
1219
+ points_scale = np.array(cropped_im_size)[None, ::-1]
1220
+
1221
+ points = points_grid[layer_idxs[i]] * points_scale
1222
+ normalized_points = _normalize_coordinates(target_size, points, original_size)
1223
+ total_points_per_crop.append(normalized_points)
1224
+
1225
+ return cropped_images, total_points_per_crop
1226
+
1227
+
1228
+ def _pad_masks(masks, crop_box: List[int], orig_height: int, orig_width: int):
1229
+ left, top, right, bottom = crop_box
1230
+ if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
1231
+ return masks
1232
+ # Coordinate transform masks
1233
+ pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
1234
+ pad = (left, pad_x - left, top, pad_y - top)
1235
+ return torch.nn.functional.pad(masks, pad, value=0)
1236
+
1237
+
1238
+ def _pad_masks_tf(masks, crop_box: List[int], orig_height: int, orig_width: int):
1239
+ left, top, right, bottom = crop_box
1240
+ if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
1241
+ return masks
1242
+ # Coordinate transform masks
1243
+ pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
1244
+ pad = (left, pad_x - left, top, pad_y - top)
1245
+ return tf.pad(masks, pad, constant_values=0)
1246
+
1247
+
1248
+ def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0):
1249
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
1250
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
1251
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
1252
+
1253
+ left, top, _, _ = crop_box
1254
+ offset = torch.tensor([[left, top, left, top]], device=boxes.device)
1255
+ # Check if boxes has a channel dimension
1256
+ if len(boxes.shape) == 3:
1257
+ offset = offset.unsqueeze(1)
1258
+ boxes = (boxes + offset).float()
1259
+
1260
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
1261
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
1262
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
1263
+ return torch.any(near_crop_edge, dim=1)
1264
+
1265
+
1266
+ def _is_box_near_crop_edge_tf(boxes, crop_box, orig_box, atol=20.0):
1267
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
1268
+ crop_box_tf = tf.convert_to_tensor(crop_box, dtype=tf.float32)
1269
+ orig_box_tf = tf.convert_to_tensor(orig_box, dtype=tf.float32)
1270
+
1271
+ left, top, _, _ = crop_box
1272
+ offset = tf.convert_to_tensor([[left, top, left, top]])
1273
+ # Check if boxes has a channel dimension
1274
+ if len(boxes.shape) == 3:
1275
+ offset = tf.expand_dims(offset, 1)
1276
+ boxes = tf.cast(boxes + offset, tf.float32)
1277
+
1278
+ near_crop_edge = tnp.isclose(boxes, crop_box_tf[None, :], atol=atol, rtol=0)
1279
+ near_image_edge = tnp.isclose(boxes, orig_box_tf[None, :], atol=atol, rtol=0)
1280
+ near_crop_edge = tf.math.logical_and(near_crop_edge, ~near_image_edge)
1281
+ return tf.reduce_any(near_crop_edge, axis=1)
1282
+
1283
+
1284
+ def _batched_mask_to_box(masks: "torch.Tensor"):
1285
+ """
1286
+ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
1287
+ corresponds the following required indices:
1288
+ - LEFT: left hand side of the bounding box
1289
+ - TOP: top of the bounding box
1290
+ - RIGHT: right of the bounding box
1291
+ - BOTTOM: bottom of the bounding box
1292
+
1293
+ Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
1294
+ is channel_1 x channel_2 x ... x 4.
1295
+
1296
+ Args:
1297
+ - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)
1298
+ """
1299
+ # torch.max below raises an error on empty inputs, just skip in this case
1300
+
1301
+ if torch.numel(masks) == 0:
1302
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
1303
+
1304
+ # Normalize shape to Cxheightxwidth
1305
+ shape = masks.shape
1306
+ height, width = shape[-2:]
1307
+
1308
+ # Get top and bottom edges
1309
+ in_height, _ = torch.max(masks, dim=-1)
1310
+ in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]
1311
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
1312
+ in_height_coords = in_height_coords + height * (~in_height)
1313
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
1314
+
1315
+ # Get left and right edges
1316
+ in_width, _ = torch.max(masks, dim=-2)
1317
+ in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]
1318
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
1319
+ in_width_coords = in_width_coords + width * (~in_width)
1320
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
1321
+
1322
+ # If the mask is empty the right edge will be to the left of the left edge.
1323
+ # Replace these boxes with [0, 0, 0, 0]
1324
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
1325
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
1326
+ out = out * (~empty_filter).unsqueeze(-1)
1327
+
1328
+ # Return to original shape
1329
+ out = out.reshape(*shape[:-2], 4)
1330
+ return out
1331
+
1332
+
1333
+ def _batched_mask_to_box_tf(masks: "tf.Tensor"):
1334
+ """
1335
+ Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
1336
+ corresponds the following required indices:
1337
+ - LEFT: left hand side of the bounding box
1338
+ - TOP: top of the bounding box
1339
+ - RIGHT: right of the bounding box
1340
+ - BOTTOM: bottom of the bounding box
1341
+
1342
+ Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
1343
+ is channel_1 x channel_2 x ... x 4.
1344
+
1345
+ Args:
1346
+ - masks (`tf.Tensor` of shape `(batch, nb_mask, height, width)`)
1347
+ """
1348
+
1349
+ if tf.size(masks) == 0:
1350
+ return tf.zeros([*masks.shape[:-2], 4])
1351
+
1352
+ # Normalize shape to Cxheightxwidth
1353
+ shape = shape_list(masks)
1354
+ height, width = shape[-2:]
1355
+
1356
+ # Get top and bottom edges
1357
+ in_height = tf.reduce_max(masks, axis=-1)
1358
+ in_height_coords = in_height * tf.range(height)[None, :]
1359
+ bottom_edges = tf.reduce_max(in_height_coords, axis=-1)
1360
+ in_height_coords = in_height_coords + height * (~in_height)
1361
+ top_edges = tf.reduce_min(in_height_coords, axis=-1)
1362
+
1363
+ # Get left and right edges
1364
+ in_width, _ = tf.reduce_max(masks, axis=-2)
1365
+ in_width_coords = in_width * tf.range(width)[None, :]
1366
+ right_edges, _ = tf.reduce_max(in_width_coords, axis=-1)
1367
+ in_width_coords = in_width_coords + width * (~in_width)
1368
+ left_edges, _ = tf.reduce_min(in_width_coords, axis=-1)
1369
+
1370
+ # If the mask is empty the right edge will be to the left of the left edge.
1371
+ # Replace these boxes with [0, 0, 0, 0]
1372
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
1373
+ out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1)
1374
+ out = out * tf.expand_dims(~empty_filter, -1)
1375
+
1376
+ # Return to original shape
1377
+ out = tf.reshape(out, *shape[:-2], 4)
1378
+ return out
1379
+
1380
+
1381
+ def _mask_to_rle_pytorch(input_mask: "torch.Tensor"):
1382
+ """
1383
+ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
1384
+ """
1385
+ # Put in fortran order and flatten height and width
1386
+ batch_size, height, width = input_mask.shape
1387
+ input_mask = input_mask.permute(0, 2, 1).flatten(1)
1388
+
1389
+ # Compute change indices
1390
+ diff = input_mask[:, 1:] ^ input_mask[:, :-1]
1391
+ change_indices = diff.nonzero()
1392
+
1393
+ # Encode run length
1394
+ out = []
1395
+ for i in range(batch_size):
1396
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
1397
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
1398
+ counts = [] if input_mask[i, 0] == 0 else [0]
1399
+ counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]]
1400
+ out.append({"size": [height, width], "counts": counts})
1401
+ return out
1402
+
1403
+
1404
+ def _mask_to_rle_tf(input_mask: "tf.Tensor"):
1405
+ """
1406
+ Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
1407
+ """
1408
+ # Put in fortran order and flatten height and width
1409
+ batch_size, height, width = input_mask.shape
1410
+ input_mask = flatten(tf.transpose(input_mask, perm=(0, 2, 1)), 1)
1411
+
1412
+ # Compute change indices
1413
+ diff = input_mask[:, 1:] ^ input_mask[:, :-1]
1414
+ change_indices = tf.where(diff)
1415
+
1416
+ # Encode run length
1417
+ out = []
1418
+ for i in range(batch_size):
1419
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
1420
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
1421
+ counts = [] if input_mask[i, 0] == 0 else [0]
1422
+ counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]]
1423
+ out.append({"size": [height, width], "counts": counts})
1424
+ return out
1425
+
1426
+
1427
+ def _rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
1428
+ """Compute a binary mask from an uncompressed RLE."""
1429
+ height, width = rle["size"]
1430
+ mask = np.empty(height * width, dtype=bool)
1431
+ idx = 0
1432
+ parity = False
1433
+ for count in rle["counts"]:
1434
+ mask[idx : idx + count] = parity
1435
+ idx += count
1436
+ parity = not parity
1437
+ mask = mask.reshape(width, height)
1438
+ return mask.transpose() # Reshape to original shape
1439
+
1440
+
1441
+ def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
1442
+ """
1443
+ Perform NMS (Non Maximum Suppression) on the outputs.
1444
+
1445
+ Args:
1446
+ rle_masks (`torch.Tensor`):
1447
+ binary masks in the RLE format
1448
+ iou_scores (`torch.Tensor` of shape (nb_masks, 1)):
1449
+ iou_scores predicted by the model
1450
+ mask_boxes (`torch.Tensor`):
1451
+ The bounding boxes corresponding to segmentation masks
1452
+ amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
1453
+ NMS threshold.
1454
+ """
1455
+ keep_by_nms = batched_nms(
1456
+ boxes=mask_boxes.float(),
1457
+ scores=iou_scores,
1458
+ idxs=torch.zeros(mask_boxes.shape[0]),
1459
+ iou_threshold=amg_crops_nms_thresh,
1460
+ )
1461
+
1462
+ iou_scores = iou_scores[keep_by_nms]
1463
+ rle_masks = [rle_masks[i] for i in keep_by_nms]
1464
+ mask_boxes = mask_boxes[keep_by_nms]
1465
+ masks = [_rle_to_mask(rle) for rle in rle_masks]
1466
+
1467
+ return masks, iou_scores, rle_masks, mask_boxes
1468
+
1469
+
1470
+ def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
1471
+ """
1472
+ Perform NMS (Non Maximum Suppression) on the outputs.
1473
+
1474
+ Args:
1475
+ rle_masks (`tf.Tensor`):
1476
+ binary masks in the RLE format
1477
+ iou_scores (`tf.Tensor` of shape (nb_masks, 1)):
1478
+ iou_scores predicted by the model
1479
+ mask_boxes (`tf.Tensor`):
1480
+ The bounding boxes corresponding to segmentation masks
1481
+ amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
1482
+ NMS threshold.
1483
+ """
1484
+ keep_by_nms = tf.image.combined_non_max_suppression(
1485
+ boxes=mask_boxes.float(),
1486
+ scores=iou_scores,
1487
+ idxs=torch.zeros(mask_boxes.shape[0]),
1488
+ iou_threshold=amg_crops_nms_thresh,
1489
+ )
1490
+
1491
+ iou_scores = iou_scores[keep_by_nms]
1492
+ rle_masks = [rle_masks[i] for i in keep_by_nms]
1493
+ mask_boxes = mask_boxes[keep_by_nms]
1494
+ masks = [_rle_to_mask(rle) for rle in rle_masks]
1495
+
1496
+ return masks, iou_scores, rle_masks, mask_boxes
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/modeling_sam.py ADDED
@@ -0,0 +1,1415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Meta AI Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch SAM model."""
16
+
17
+ import collections
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from torch import Tensor, nn
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_outputs import BaseModelOutput
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
32
+ from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ _CONFIG_FOR_DOC = "SamConfig"
38
+ _CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge"
39
+
40
+
41
+ from ..deprecated._archive_maps import SAM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
42
+
43
+
44
+ @dataclass
45
+ class SamVisionEncoderOutput(ModelOutput):
46
+ """
47
+ Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection
48
+ layer to the pooler_output.
49
+
50
+ Args:
51
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
52
+ The image embeddings obtained by applying the projection layer to the pooler_output.
53
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
54
+ Sequence of hidden-states at the output of the last layer of the model.
55
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
56
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
57
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
58
+
59
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
60
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
61
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
62
+ sequence_length)`.
63
+
64
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
65
+ heads.
66
+ """
67
+
68
+ image_embeds: Optional[torch.FloatTensor] = None
69
+ last_hidden_state: torch.FloatTensor = None
70
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
71
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
72
+
73
+
74
+ @dataclass
75
+ class SamImageSegmentationOutput(ModelOutput):
76
+ """
77
+ Base class for Segment-Anything model's output
78
+
79
+ Args:
80
+ iou_scores (`torch.FloatTensor` of shape `(batch_size, num_masks)`):
81
+ The iou scores of the predicted masks.
82
+ pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`):
83
+ The predicted low resolutions masks. Needs to be post-processed by the processor
84
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
85
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
86
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
87
+
88
+ Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs.
89
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
90
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
91
+ sequence_length)`.
92
+
93
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
94
+ heads.
95
+ mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
96
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
97
+ sequence_length)`.
98
+
99
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
100
+ heads.
101
+ """
102
+
103
+ iou_scores: torch.FloatTensor = None
104
+ pred_masks: torch.FloatTensor = None
105
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
106
+ vision_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
107
+ mask_decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
108
+
109
+
110
+ class SamPatchEmbeddings(nn.Module):
111
+ """
112
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
113
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
114
+ Transformer.
115
+ """
116
+
117
+ def __init__(self, config):
118
+ super().__init__()
119
+ image_size, patch_size = config.image_size, config.patch_size
120
+ num_channels, hidden_size = config.num_channels, config.hidden_size
121
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
122
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
123
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
124
+ self.image_size = image_size
125
+ self.patch_size = patch_size
126
+ self.num_channels = num_channels
127
+ self.num_patches = num_patches
128
+
129
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
130
+
131
+ def forward(self, pixel_values):
132
+ batch_size, num_channels, height, width = pixel_values.shape
133
+ if num_channels != self.num_channels:
134
+ raise ValueError(
135
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
136
+ )
137
+ if height != self.image_size[0] or width != self.image_size[1]:
138
+ raise ValueError(
139
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
140
+ )
141
+ embeddings = self.projection(pixel_values).permute(0, 2, 3, 1)
142
+ return embeddings
143
+
144
+
145
+ class SamMLPBlock(nn.Module):
146
+ def __init__(self, config):
147
+ super().__init__()
148
+ self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim)
149
+ self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size)
150
+ self.act = ACT2FN[config.hidden_act]
151
+
152
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
153
+ hidden_states = self.lin1(hidden_states)
154
+ hidden_states = self.act(hidden_states)
155
+ hidden_states = self.lin2(hidden_states)
156
+ return hidden_states
157
+
158
+
159
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->Sam
160
+ class SamLayerNorm(nn.Module):
161
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
162
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
163
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
164
+ """
165
+
166
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
167
+ super().__init__()
168
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
169
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
170
+ self.eps = eps
171
+ self.data_format = data_format
172
+ if self.data_format not in ["channels_last", "channels_first"]:
173
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
174
+ self.normalized_shape = (normalized_shape,)
175
+
176
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
177
+ if self.data_format == "channels_last":
178
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
179
+ elif self.data_format == "channels_first":
180
+ input_dtype = x.dtype
181
+ x = x.float()
182
+ u = x.mean(1, keepdim=True)
183
+ s = (x - u).pow(2).mean(1, keepdim=True)
184
+ x = (x - u) / torch.sqrt(s + self.eps)
185
+ x = x.to(dtype=input_dtype)
186
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
187
+ return x
188
+
189
+
190
+ class SamAttention(nn.Module):
191
+ """
192
+ SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
193
+ values.
194
+ """
195
+
196
+ def __init__(self, config, downsample_rate=None):
197
+ super().__init__()
198
+ self.hidden_size = config.hidden_size
199
+
200
+ downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate
201
+
202
+ self.internal_dim = config.hidden_size // downsample_rate
203
+ self.num_attention_heads = config.num_attention_heads
204
+ if self.internal_dim % config.num_attention_heads != 0:
205
+ raise ValueError("num_attention_heads must divide hidden_size.")
206
+
207
+ self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
208
+ self.k_proj = nn.Linear(self.hidden_size, self.internal_dim)
209
+ self.v_proj = nn.Linear(self.hidden_size, self.internal_dim)
210
+ self.out_proj = nn.Linear(self.internal_dim, self.hidden_size)
211
+
212
+ def _separate_heads(self, hidden_states: Tensor, num_attention_heads: int) -> Tensor:
213
+ batch, point_batch_size, n_tokens, channel = hidden_states.shape
214
+ c_per_head = channel // num_attention_heads
215
+ hidden_states = hidden_states.reshape(batch * point_batch_size, n_tokens, num_attention_heads, c_per_head)
216
+ return hidden_states.transpose(1, 2)
217
+
218
+ def _recombine_heads(self, hidden_states: Tensor, point_batch_size: int) -> Tensor:
219
+ batch, n_heads, n_tokens, c_per_head = hidden_states.shape
220
+ hidden_states = hidden_states.transpose(1, 2)
221
+ return hidden_states.reshape(batch // point_batch_size, point_batch_size, n_tokens, n_heads * c_per_head)
222
+
223
+ def forward(self, query: Tensor, key: Tensor, value: Tensor, attention_similarity: Tensor = None) -> Tensor:
224
+ # Input projections
225
+ query = self.q_proj(query)
226
+ key = self.k_proj(key)
227
+ value = self.v_proj(value)
228
+
229
+ point_batch_size = query.shape[1]
230
+ # Separate into heads
231
+ query = self._separate_heads(query, self.num_attention_heads)
232
+ key = self._separate_heads(key, self.num_attention_heads)
233
+ value = self._separate_heads(value, self.num_attention_heads)
234
+
235
+ # SamAttention
236
+ _, _, _, c_per_head = query.shape
237
+ attn = query @ key.permute(0, 1, 3, 2) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens
238
+ attn = attn / math.sqrt(c_per_head)
239
+ attn = torch.softmax(attn, dim=-1)
240
+
241
+ if attention_similarity is not None:
242
+ attn = attn + attention_similarity
243
+ attn = torch.softmax(attn, dim=-1)
244
+
245
+ # Get output
246
+ out = attn @ value
247
+ out = self._recombine_heads(out, point_batch_size)
248
+ out = self.out_proj(out)
249
+
250
+ return out
251
+
252
+
253
+ class SamTwoWayAttentionBlock(nn.Module):
254
+ def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False):
255
+ """
256
+ A transformer block with four layers:
257
+ (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on
258
+ sparse inputs (4) cross attention of dense inputs -> sparse inputs
259
+
260
+ Arguments:
261
+ config (`SamMaskDecoderConfig`):
262
+ The configuration file used to instantiate the block
263
+ attention_downsample_rate (*optionalk*, int, defaults to 2):
264
+ The downsample ratio of the block used to reduce the inner dim of the attention.
265
+ skip_first_layer_pe (*optional*, bool, defaults to `False`):
266
+ Whether or not to skip the addition of the query_point_embedding on the first layer.
267
+ """
268
+ super().__init__()
269
+
270
+ self.hidden_size = config.hidden_size
271
+ self.layer_norm_eps = config.layer_norm_eps
272
+
273
+ self.self_attn = SamAttention(config, downsample_rate=1)
274
+ self.layer_norm1 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
275
+
276
+ self.cross_attn_token_to_image = SamAttention(config, downsample_rate=attention_downsample_rate)
277
+ self.layer_norm2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
278
+
279
+ self.mlp = SamMLPBlock(config)
280
+ self.layer_norm3 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
281
+
282
+ self.layer_norm4 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps)
283
+ self.cross_attn_image_to_token = SamAttention(config, downsample_rate=attention_downsample_rate)
284
+
285
+ self.skip_first_layer_pe = skip_first_layer_pe
286
+
287
+ def forward(
288
+ self,
289
+ queries: Tensor,
290
+ keys: Tensor,
291
+ query_point_embedding: Tensor,
292
+ key_point_embedding: Tensor,
293
+ attention_similarity: Tensor,
294
+ output_attentions: bool = False,
295
+ ):
296
+ # Self attention block
297
+ if self.skip_first_layer_pe:
298
+ queries = self.self_attn(query=queries, key=queries, value=queries)
299
+ else:
300
+ query = queries + query_point_embedding
301
+ attn_out = self.self_attn(query=query, key=query, value=queries)
302
+ queries = queries + attn_out
303
+ queries = self.layer_norm1(queries)
304
+
305
+ # Cross attention block, tokens attending to image embedding
306
+ query = queries + query_point_embedding
307
+ key = keys + key_point_embedding
308
+
309
+ attn_out = self.cross_attn_token_to_image(
310
+ query=query, key=key, value=keys, attention_similarity=attention_similarity
311
+ )
312
+ queries = queries + attn_out
313
+
314
+ queries = self.layer_norm2(queries)
315
+
316
+ # MLP block
317
+ mlp_out = self.mlp(queries)
318
+ queries = queries + mlp_out
319
+ queries = self.layer_norm3(queries)
320
+
321
+ # Cross attention block, image embedding attending to tokens
322
+ query = queries + query_point_embedding
323
+ key = keys + key_point_embedding
324
+
325
+ attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries)
326
+ keys = keys + attn_out
327
+
328
+ keys = self.layer_norm4(keys)
329
+
330
+ outputs = (queries, keys)
331
+
332
+ if output_attentions:
333
+ outputs = outputs + (attn_out,)
334
+ else:
335
+ outputs = outputs + (None,)
336
+
337
+ return outputs
338
+
339
+
340
+ class SamTwoWayTransformer(nn.Module):
341
+ def __init__(self, config: SamMaskDecoderConfig):
342
+ super().__init__()
343
+ self.config = config
344
+
345
+ self.num_hidden_layers = config.num_hidden_layers
346
+ self.layers = nn.ModuleList()
347
+
348
+ for i in range(self.num_hidden_layers):
349
+ self.layers.append(SamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0)))
350
+
351
+ self.final_attn_token_to_image = SamAttention(config)
352
+ self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size)
353
+
354
+ def forward(
355
+ self,
356
+ point_embeddings: Tensor,
357
+ image_embeddings: Tensor,
358
+ image_positional_embeddings: Tensor,
359
+ attention_similarity: Tensor,
360
+ target_embedding=None,
361
+ output_attentions: Optional[bool] = None,
362
+ output_hidden_states: Optional[bool] = None,
363
+ return_dict: Optional[bool] = None,
364
+ ) -> Union[Tuple, BaseModelOutput]:
365
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
366
+ output_hidden_states = (
367
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
368
+ )
369
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
370
+
371
+ all_attentions = ()
372
+
373
+ if image_embeddings is None:
374
+ raise ValueError("You have to specify an image_embedding")
375
+
376
+ image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
377
+ image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
378
+
379
+ # Prepare queries
380
+ queries = point_embeddings
381
+ keys = image_embeddings
382
+
383
+ # Apply transformer blocks and final layernorm
384
+ for layer in self.layers:
385
+ if target_embedding is not None:
386
+ queries += target_embedding
387
+
388
+ queries, keys, attention_outputs = layer(
389
+ queries=queries,
390
+ keys=keys,
391
+ query_point_embedding=point_embeddings,
392
+ key_point_embedding=image_positional_embeddings,
393
+ attention_similarity=attention_similarity,
394
+ output_attentions=output_attentions,
395
+ )
396
+
397
+ if output_attentions:
398
+ all_attentions = all_attentions + (attention_outputs,)
399
+
400
+ # Apply the final attenion layer from the points to the image
401
+ query = queries + point_embeddings
402
+ key = keys + image_positional_embeddings
403
+
404
+ attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys)
405
+
406
+ queries = queries + attn_out
407
+ queries = self.layer_norm_final_attn(queries)
408
+ return queries, keys, all_attentions
409
+
410
+
411
+ class SamFeedForward(nn.Module):
412
+ def __init__(
413
+ self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False
414
+ ):
415
+ super().__init__()
416
+ self.num_layers = num_layers
417
+ self.activation = nn.ReLU()
418
+ self.proj_in = nn.Linear(input_dim, hidden_dim)
419
+ self.proj_out = nn.Linear(hidden_dim, output_dim)
420
+ self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)])
421
+ self.sigmoid_output = sigmoid_output
422
+
423
+ def forward(self, hidden_states):
424
+ hidden_states = self.proj_in(hidden_states)
425
+ hidden_states = self.activation(hidden_states)
426
+ for layer in self.layers:
427
+ hidden_states = self.activation(layer(hidden_states))
428
+
429
+ hidden_states = self.proj_out(hidden_states)
430
+ if self.sigmoid_output:
431
+ hidden_states = F.sigmoid(hidden_states)
432
+ return hidden_states
433
+
434
+
435
+ class SamMaskDecoder(nn.Module):
436
+ def __init__(self, config: SamMaskDecoderConfig):
437
+ super().__init__()
438
+
439
+ self.hidden_size = config.hidden_size
440
+
441
+ self.num_multimask_outputs = config.num_multimask_outputs
442
+ self.num_mask_tokens = config.num_multimask_outputs + 1
443
+
444
+ self.iou_token = nn.Embedding(1, self.hidden_size)
445
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size)
446
+
447
+ self.transformer = SamTwoWayTransformer(config)
448
+
449
+ # should we create a new class for this?
450
+ self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2)
451
+ self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2)
452
+ self.upscale_layer_norm = SamLayerNorm(self.hidden_size // 4, data_format="channels_first")
453
+ self.activation = nn.GELU()
454
+
455
+ mlps_list = []
456
+ for _ in range(self.num_mask_tokens):
457
+ mlps_list += [SamFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)]
458
+ self.output_hypernetworks_mlps = nn.ModuleList(mlps_list)
459
+
460
+ self.iou_prediction_head = SamFeedForward(
461
+ self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth
462
+ )
463
+
464
+ def forward(
465
+ self,
466
+ image_embeddings: torch.Tensor,
467
+ image_positional_embeddings: torch.Tensor,
468
+ sparse_prompt_embeddings: torch.Tensor,
469
+ dense_prompt_embeddings: torch.Tensor,
470
+ multimask_output: bool,
471
+ output_attentions: Optional[bool] = None,
472
+ attention_similarity: torch.Tensor = None,
473
+ target_embedding: torch.Tensor = None,
474
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
475
+ """
476
+ Predict masks given image and prompt embeddings.
477
+
478
+ Args:
479
+ image_embeddings (`torch.Tensor`):
480
+ the embeddings from the image encoder
481
+ image_positional_embedding (`torch.Tensor`):
482
+ positional encoding with the shape of image_embeddings
483
+ sparse_prompt_embeddings (`torch.Tensor`):
484
+ The embeddings of the points and boxes
485
+ dense_prompt_embeddings (`torch.Tensor`):
486
+ the embeddings of the mask inputs
487
+ multimask_output (bool):
488
+ Whether to return multiple masks or a single mask.
489
+ output_attentions (bool, *optional*):
490
+ Whether or not to return the attentions tensors of all attention layers.
491
+ """
492
+ batch_size, num_channels, height, width = image_embeddings.shape
493
+ point_batch_size = sparse_prompt_embeddings.shape[1]
494
+ # Concatenate output tokens
495
+ output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
496
+ output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)
497
+
498
+ if sparse_prompt_embeddings.sum().item() != 0:
499
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2)
500
+ else:
501
+ tokens = output_tokens
502
+ point_embeddings = tokens.to(self.iou_token.weight.dtype)
503
+
504
+ # Expand per-image data in batch direction to be per-point
505
+ image_embeddings = image_embeddings + dense_prompt_embeddings
506
+ image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0)
507
+ image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)
508
+
509
+ # Run the transformer, image_positional_embedding are consumed
510
+ point_embedding, image_embeddings, attentions = self.transformer(
511
+ point_embeddings=point_embeddings,
512
+ image_embeddings=image_embeddings,
513
+ image_positional_embeddings=image_positional_embeddings,
514
+ attention_similarity=attention_similarity,
515
+ target_embedding=target_embedding,
516
+ output_attentions=output_attentions,
517
+ )
518
+ iou_token_out = point_embedding[:, :, 0, :]
519
+ mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :]
520
+
521
+ # Upscale mask embeddings and predict masks using the mask tokens
522
+ image_embeddings = image_embeddings.transpose(2, 3).reshape(
523
+ batch_size * point_batch_size, num_channels, height, width
524
+ )
525
+
526
+ upscaled_embedding = self.upscale_conv1(image_embeddings)
527
+ upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
528
+ upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding))
529
+
530
+ hyper_in_list = []
531
+ for i in range(self.num_mask_tokens):
532
+ current_mlp = self.output_hypernetworks_mlps[i]
533
+ hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]
534
+ hyper_in = torch.stack(hyper_in_list, dim=2)
535
+
536
+ _, num_channels, height, width = upscaled_embedding.shape
537
+ upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width)
538
+ masks = (hyper_in @ upscaled_embedding).reshape(batch_size, point_batch_size, -1, height, width)
539
+
540
+ # Generate mask quality predictions
541
+ iou_pred = self.iou_prediction_head(iou_token_out)
542
+
543
+ # Select the correct mask or masks for output
544
+ if multimask_output:
545
+ mask_slice = slice(1, None)
546
+ else:
547
+ mask_slice = slice(0, 1)
548
+ masks = masks[:, :, mask_slice, :, :]
549
+ iou_pred = iou_pred[:, :, mask_slice]
550
+
551
+ outputs = (masks, iou_pred)
552
+
553
+ if output_attentions:
554
+ outputs = outputs + (attentions,)
555
+ else:
556
+ outputs = outputs + (None,)
557
+
558
+ return outputs
559
+
560
+
561
+ class SamPositionalEmbedding(nn.Module):
562
+ def __init__(self, config):
563
+ super().__init__()
564
+ self.scale = config.hidden_size // 2
565
+ self.register_buffer("positional_embedding", self.scale * torch.randn((2, config.num_pos_feats)))
566
+
567
+ def forward(self, input_coords, input_shape=None):
568
+ """Positionally encode points that are normalized to [0,1]."""
569
+ coordinates = input_coords.clone()
570
+
571
+ if input_shape is not None:
572
+ coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1]
573
+ coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0]
574
+
575
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
576
+ coordinates = 2 * coordinates - 1
577
+ coordinates = coordinates.to(self.positional_embedding.dtype)
578
+ coordinates = coordinates @ self.positional_embedding
579
+ coordinates = 2 * np.pi * coordinates
580
+ # outputs d_1 x ... x d_n x channel shape
581
+ return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1)
582
+
583
+
584
+ class SamMaskEmbedding(nn.Module):
585
+ def __init__(self, config: SamPromptEncoderConfig):
586
+ super().__init__()
587
+ self.mask_input_channels = config.mask_input_channels // 4
588
+ self.activation = ACT2FN[config.hidden_act]
589
+ self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2)
590
+ self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2)
591
+ self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1)
592
+ self.layer_norm1 = SamLayerNorm(
593
+ self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first"
594
+ )
595
+ self.layer_norm2 = SamLayerNorm(
596
+ self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first"
597
+ )
598
+
599
+ def forward(self, masks):
600
+ hidden_states = self.conv1(masks)
601
+ hidden_states = self.layer_norm1(hidden_states)
602
+ hidden_states = self.activation(hidden_states)
603
+
604
+ hidden_states = self.conv2(hidden_states)
605
+ hidden_states = self.layer_norm2(hidden_states)
606
+ hidden_states = self.activation(hidden_states)
607
+ dense_embeddings = self.conv3(hidden_states)
608
+ return dense_embeddings
609
+
610
+
611
+ class SamPromptEncoder(nn.Module):
612
+ def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding):
613
+ super().__init__()
614
+ self.shared_embedding = shared_patch_embedding
615
+ self.mask_embed = SamMaskEmbedding(config)
616
+ self.no_mask_embed = nn.Embedding(1, config.hidden_size)
617
+
618
+ self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size)
619
+ self.input_image_size = config.image_size
620
+
621
+ self.point_embed = nn.ModuleList(
622
+ [nn.Embedding(1, config.hidden_size) for i in range(config.num_point_embeddings)]
623
+ )
624
+ self.hidden_size = config.hidden_size
625
+ self.not_a_point_embed = nn.Embedding(1, config.hidden_size)
626
+
627
+ def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
628
+ """Embeds point prompts."""
629
+ points = points + 0.5 # Shift to center of pixel
630
+ if pad:
631
+ target_point_shape = (points.shape[0], points.shape[1], 1, points.shape[-1])
632
+ target_labels_shape = (points.shape[0], points.shape[1], 1)
633
+ padding_point = torch.zeros(target_point_shape, device=points.device)
634
+ padding_label = -torch.ones(target_labels_shape, device=labels.device)
635
+ points = torch.cat([points, padding_point], dim=2)
636
+ labels = torch.cat([labels, padding_label], dim=2)
637
+ input_shape = (self.input_image_size, self.input_image_size)
638
+ point_embedding = self.shared_embedding(points, input_shape)
639
+
640
+ # torch.where and expanding the labels tensor is required by the ONNX export
641
+ point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding)
642
+
643
+ # This is required for the ONNX export. The dtype, device need to be explicitely
644
+ # specificed as otherwise torch.onnx.export interprets as double
645
+ point_embedding = torch.where(
646
+ labels[..., None] != -10,
647
+ point_embedding,
648
+ torch.tensor(0.0, dtype=point_embedding.dtype, device=point_embedding.device),
649
+ )
650
+
651
+ point_embedding = torch.where(
652
+ (labels == 0)[:, :, :, None],
653
+ point_embedding + self.point_embed[0].weight[None, None, :, :],
654
+ point_embedding,
655
+ )
656
+
657
+ point_embedding = torch.where(
658
+ (labels == 1)[:, :, :, None],
659
+ point_embedding + self.point_embed[1].weight[None, None, :, :],
660
+ point_embedding,
661
+ )
662
+
663
+ return point_embedding
664
+
665
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
666
+ """Embeds box prompts."""
667
+ boxes = boxes + 0.5 # Shift to center of pixel
668
+ batch_size, nb_boxes = boxes.shape[:2]
669
+ coords = boxes.reshape(batch_size, nb_boxes, 2, 2)
670
+ input_shape = (self.input_image_size, self.input_image_size)
671
+ corner_embedding = self.shared_embedding(coords, input_shape)
672
+ corner_embedding[:, :, 0, :] += self.point_embed[2].weight
673
+ corner_embedding[:, :, 1, :] += self.point_embed[3].weight
674
+ return corner_embedding
675
+
676
+ def forward(
677
+ self,
678
+ input_points: Optional[Tuple[torch.Tensor, torch.Tensor]],
679
+ input_labels: Optional[torch.Tensor],
680
+ input_boxes: Optional[torch.Tensor],
681
+ input_masks: Optional[torch.Tensor],
682
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
683
+ """
684
+ Embeds different types of prompts, returning both sparse and dense embeddings.
685
+
686
+ Args:
687
+ points (`torch.Tensor`, *optional*):
688
+ point coordinates and labels to embed.
689
+ boxes (`torch.Tensor`, *optional*):
690
+ boxes to embed
691
+ masks (`torch.Tensor`, *optional*):
692
+ masks to embed
693
+ """
694
+ sparse_embeddings = None
695
+ batch_size = 1
696
+ target_device = self.shared_embedding.positional_embedding.device
697
+ if input_points is not None:
698
+ batch_size, point_batch_size = input_points.shape[:2]
699
+ if input_labels is None:
700
+ raise ValueError("If points are provided, labels must also be provided.")
701
+ point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
702
+ sparse_embeddings = point_embeddings
703
+ if input_boxes is not None:
704
+ batch_size = input_boxes.shape[0]
705
+ box_embeddings = self._embed_boxes(input_boxes)
706
+ if sparse_embeddings is None:
707
+ sparse_embeddings = box_embeddings
708
+ else:
709
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)
710
+ if input_masks is not None:
711
+ dense_embeddings = self.mask_embed(input_masks)
712
+ else:
713
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
714
+ batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1]
715
+ )
716
+
717
+ if sparse_embeddings is None:
718
+ sparse_embeddings = torch.zeros((batch_size, 1, 1, self.hidden_size), device=target_device)
719
+
720
+ return sparse_embeddings, dense_embeddings
721
+
722
+
723
+ class SamVisionAttention(nn.Module):
724
+ """Multi-head Attention block with relative position embeddings."""
725
+
726
+ def __init__(self, config, window_size):
727
+ super().__init__()
728
+ input_size = (
729
+ (config.image_size // config.patch_size, config.image_size // config.patch_size)
730
+ if window_size == 0
731
+ else (window_size, window_size)
732
+ )
733
+
734
+ self.num_attention_heads = config.num_attention_heads
735
+ head_dim = config.hidden_size // config.num_attention_heads
736
+ self.scale = head_dim**-0.5
737
+ self.dropout = config.attention_dropout
738
+
739
+ self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias)
740
+ self.proj = nn.Linear(config.hidden_size, config.hidden_size)
741
+
742
+ self.use_rel_pos = config.use_rel_pos
743
+ if self.use_rel_pos:
744
+ if input_size is None:
745
+ raise ValueError("Input size must be provided if using relative positional encoding.")
746
+
747
+ # initialize relative positional embeddings
748
+ self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
749
+ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
750
+
751
+ def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
752
+ """
753
+ Get relative positional embeddings according to the relative positions of
754
+ query and key sizes.
755
+
756
+ Args:
757
+ q_size (int):
758
+ size of the query.
759
+ k_size (int):
760
+ size of key k.
761
+ rel_pos (`torch.Tensor`):
762
+ relative position embeddings (L, channel).
763
+
764
+ Returns:
765
+ Extracted positional embeddings according to relative positions.
766
+ """
767
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
768
+ # Interpolate rel pos.
769
+ rel_pos_resized = F.interpolate(
770
+ rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
771
+ size=max_rel_dist,
772
+ mode="linear",
773
+ )
774
+ rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
775
+
776
+ # Scale the coords with short length if shapes for q and k are different.
777
+ q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
778
+ k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
779
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
780
+
781
+ return rel_pos_resized[relative_coords.long()]
782
+
783
+ def add_decomposed_rel_pos(
784
+ self,
785
+ attn: torch.Tensor,
786
+ query: torch.Tensor,
787
+ rel_pos_h: torch.Tensor,
788
+ rel_pos_w: torch.Tensor,
789
+ q_size: Tuple[int, int],
790
+ k_size: Tuple[int, int],
791
+ ) -> torch.Tensor:
792
+ """
793
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
794
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
795
+
796
+ Args:
797
+ attn (`torch.Tensor`):
798
+ attention map.
799
+ query (`torch.Tensor`):
800
+ query q in the attention layer with shape (batch_size, query_height * query_width, channel).
801
+ rel_pos_h (`torch.Tensor`):
802
+ relative position embeddings (Lh, channel) for height axis.
803
+ rel_pos_w (`torch.Tensor`):
804
+ relative position embeddings (Lw, channel) for width axis.
805
+ q_size (tuple):
806
+ spatial sequence size of query q with (query_height, query_width).
807
+ k_size (tuple):
808
+ spatial sequence size of key k with (key_height, key_width).
809
+
810
+ Returns:
811
+ attn (`torch.Tensor`):
812
+ attention map with added relative positional embeddings.
813
+ """
814
+ query_height, query_width = q_size
815
+ key_height, key_width = k_size
816
+ relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h)
817
+ relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w)
818
+
819
+ batch_size, _, dim = query.shape
820
+ reshaped_query = query.reshape(batch_size, query_height, query_width, dim)
821
+ rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height)
822
+ rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width)
823
+ attn = attn.reshape(batch_size, query_height, query_width, key_height, key_width)
824
+ attn = attn + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
825
+ attn = attn.reshape(batch_size, query_height * query_width, key_height * key_width)
826
+ return attn
827
+
828
+ def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor:
829
+ batch_size, height, width, _ = hidden_states.shape
830
+ # qkv with shape (3, batch_size, nHead, height * width, channel)
831
+ qkv = (
832
+ self.qkv(hidden_states)
833
+ .reshape(batch_size, height * width, 3, self.num_attention_heads, -1)
834
+ .permute(2, 0, 3, 1, 4)
835
+ )
836
+ # q, k, v with shape (batch_size * nHead, height * width, channel)
837
+ query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0)
838
+
839
+ attn_weights = (query * self.scale) @ key.transpose(-2, -1)
840
+
841
+ if self.use_rel_pos:
842
+ attn_weights = self.add_decomposed_rel_pos(
843
+ attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width)
844
+ )
845
+
846
+ attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
847
+
848
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
849
+
850
+ attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1)
851
+ attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1)
852
+
853
+ attn_output = self.proj(attn_output)
854
+
855
+ if output_attentions:
856
+ outputs = (attn_output, attn_weights)
857
+ else:
858
+ outputs = (attn_output, None)
859
+
860
+ return outputs
861
+
862
+
863
+ class SamVisionLayer(nn.Module):
864
+ def __init__(self, config, window_size):
865
+ super().__init__()
866
+ self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
867
+ self.attn = SamVisionAttention(config, window_size)
868
+ self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
869
+ self.mlp = SamMLPBlock(config)
870
+ self.window_size = window_size
871
+
872
+ def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
873
+ """
874
+ Args:
875
+ Partition into non-overlapping windows with padding if needed.
876
+ hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window
877
+ size.
878
+
879
+ Returns:
880
+ windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel].
881
+ (pad_height, pad_width): padded height and width before partition
882
+ """
883
+ batch_size, height, width, channel = hidden_states.shape
884
+
885
+ pad_h = (window_size - height % window_size) % window_size
886
+ pad_w = (window_size - width % window_size) % window_size
887
+ hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h))
888
+ pad_height, pad_width = height + pad_h, width + pad_w
889
+
890
+ hidden_states = hidden_states.reshape(
891
+ batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel
892
+ )
893
+ windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel)
894
+ return windows, (pad_height, pad_width)
895
+
896
+ def window_unpartition(
897
+ self, windows: torch.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int]
898
+ ) -> torch.Tensor:
899
+ """
900
+ Args:
901
+ Window unpartition into original sequences and removing padding.
902
+ hidden_states (tensor):
903
+ input tokens with [batch_size * num_windows, window_size, window_size, channel].
904
+ window_size (int):
905
+ window size.
906
+ padding_shape (Tuple):
907
+ padded height and width (pad_height, pad_width).
908
+ original_shape (Tuple): original height and width (height, width) before padding.
909
+
910
+ Returns:
911
+ hidden_states: unpartitioned sequences with [batch_size, height, width, channel].
912
+ """
913
+ pad_height, pad_width = padding_shape
914
+ height, width = original_shape
915
+ batch_size = windows.shape[0] // (pad_height * pad_width // window_size // window_size)
916
+ hidden_states = windows.reshape(
917
+ batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1
918
+ )
919
+ hidden_states = (
920
+ hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1)
921
+ )
922
+
923
+ hidden_states = hidden_states[:, :height, :width, :].contiguous()
924
+ return hidden_states
925
+
926
+ def forward(
927
+ self,
928
+ hidden_states: torch.Tensor,
929
+ output_attentions: Optional[bool] = False,
930
+ ) -> Tuple[torch.FloatTensor]:
931
+ residual = hidden_states
932
+
933
+ hidden_states = self.layer_norm1(hidden_states)
934
+ # Window partition
935
+ if self.window_size > 0:
936
+ height, width = hidden_states.shape[1], hidden_states.shape[2]
937
+ hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size)
938
+
939
+ hidden_states, attn_weights = self.attn(
940
+ hidden_states=hidden_states,
941
+ output_attentions=output_attentions,
942
+ )
943
+ # Reverse window partition
944
+ if self.window_size > 0:
945
+ hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width))
946
+
947
+ hidden_states = residual + hidden_states
948
+ layernorm_output = self.layer_norm2(hidden_states)
949
+ hidden_states = hidden_states + self.mlp(layernorm_output)
950
+
951
+ outputs = (hidden_states,)
952
+ if output_attentions:
953
+ outputs += (attn_weights,)
954
+
955
+ return outputs
956
+
957
+
958
+ class SamVisionNeck(nn.Module):
959
+ def __init__(self, config: SamVisionConfig):
960
+ super().__init__()
961
+ self.config = config
962
+
963
+ self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False)
964
+ self.layer_norm1 = SamLayerNorm(config.output_channels, data_format="channels_first")
965
+ self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False)
966
+ self.layer_norm2 = SamLayerNorm(config.output_channels, data_format="channels_first")
967
+
968
+ def forward(self, hidden_states):
969
+ hidden_states = hidden_states.permute(0, 3, 1, 2)
970
+ hidden_states = self.conv1(hidden_states)
971
+ hidden_states = self.layer_norm1(hidden_states)
972
+
973
+ hidden_states = self.conv2(hidden_states)
974
+ hidden_states = self.layer_norm2(hidden_states)
975
+ return hidden_states
976
+
977
+
978
+ class SamVisionEncoder(nn.Module):
979
+ def __init__(self, config: SamVisionConfig):
980
+ super().__init__()
981
+ self.config = config
982
+ self.image_size = config.image_size
983
+
984
+ self.patch_embed = SamPatchEmbeddings(config)
985
+
986
+ self.pos_embed = None
987
+ if config.use_abs_pos:
988
+ # Initialize absolute positional embedding with pretrain image size.
989
+ self.pos_embed = nn.Parameter(
990
+ torch.zeros(
991
+ 1,
992
+ config.image_size // config.patch_size,
993
+ config.image_size // config.patch_size,
994
+ config.hidden_size,
995
+ )
996
+ )
997
+
998
+ self.layers = nn.ModuleList()
999
+ for i in range(config.num_hidden_layers):
1000
+ layer = SamVisionLayer(
1001
+ config,
1002
+ window_size=config.window_size if i not in config.global_attn_indexes else 0,
1003
+ )
1004
+ self.layers.append(layer)
1005
+
1006
+ self.neck = SamVisionNeck(config)
1007
+
1008
+ self.gradient_checkpointing = False
1009
+
1010
+ def get_input_embeddings(self):
1011
+ return self.patch_embed
1012
+
1013
+ def forward(
1014
+ self,
1015
+ pixel_values: Optional[torch.FloatTensor] = None,
1016
+ output_attentions: Optional[bool] = None,
1017
+ output_hidden_states: Optional[bool] = None,
1018
+ return_dict: Optional[bool] = None,
1019
+ ) -> Union[Tuple, SamVisionEncoderOutput]:
1020
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1021
+ output_hidden_states = (
1022
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1023
+ )
1024
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1025
+
1026
+ if pixel_values is None:
1027
+ raise ValueError("You have to specify pixel_values")
1028
+
1029
+ hidden_states = self.patch_embed(pixel_values)
1030
+ if self.pos_embed is not None:
1031
+ hidden_states = hidden_states + self.pos_embed
1032
+
1033
+ all_hidden_states = () if output_hidden_states else None
1034
+ all_self_attentions = () if output_attentions else None
1035
+
1036
+ for i, layer_module in enumerate(self.layers):
1037
+ if output_hidden_states:
1038
+ all_hidden_states = all_hidden_states + (hidden_states,)
1039
+
1040
+ if self.gradient_checkpointing and self.training:
1041
+ layer_outputs = self._gradient_checkpointing_func(
1042
+ layer_module.__call__,
1043
+ hidden_states,
1044
+ )
1045
+ else:
1046
+ layer_outputs = layer_module(hidden_states, output_attentions=output_attentions)
1047
+
1048
+ hidden_states = layer_outputs[0]
1049
+
1050
+ if output_attentions:
1051
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
1052
+
1053
+ if output_hidden_states:
1054
+ all_hidden_states = all_hidden_states + (hidden_states,)
1055
+
1056
+ hidden_states = self.neck(hidden_states)
1057
+
1058
+ if not return_dict:
1059
+ outputs = (hidden_states,)
1060
+ if output_hidden_states:
1061
+ outputs = outputs + (all_hidden_states,)
1062
+ if output_attentions:
1063
+ outputs = outputs + (all_self_attentions,)
1064
+ return outputs
1065
+
1066
+ return SamVisionEncoderOutput(
1067
+ last_hidden_state=hidden_states,
1068
+ hidden_states=all_hidden_states,
1069
+ attentions=all_self_attentions,
1070
+ )
1071
+
1072
+
1073
+ class SamPreTrainedModel(PreTrainedModel):
1074
+ config_class = SamConfig
1075
+ base_model_prefix = "sam"
1076
+ main_input_name = "pixel_values"
1077
+
1078
+ def _init_weights(self, module):
1079
+ std = self.config.initializer_range
1080
+ if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
1081
+ module.weight.data.normal_(mean=0.0, std=std)
1082
+ if module.bias is not None:
1083
+ module.bias.data.zero_()
1084
+ elif isinstance(module, nn.Embedding):
1085
+ module.weight.data.normal_(mean=0.0, std=std)
1086
+ if module.padding_idx is not None:
1087
+ module.weight.data[module.padding_idx].zero_()
1088
+
1089
+
1090
+ SAM_START_DOCSTRING = r"""
1091
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1092
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1093
+ etc.)
1094
+
1095
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1096
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1097
+ and behavior.
1098
+
1099
+ Parameters:
1100
+ config ([`SamConfig`]): Model configuration class with all the parameters of the model.
1101
+ Initializing with a config file does not load the weights associated with the model, only the
1102
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1103
+ """
1104
+
1105
+
1106
+ SAM_INPUTS_DOCSTRING = r"""
1107
+ Args:
1108
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
1109
+ Pixel values. Pixel values can be obtained using [`SamProcessor`]. See [`SamProcessor.__call__`] for
1110
+ details.
1111
+ input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
1112
+ Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
1113
+ better results. The points can be obtained by passing a list of list of list to the processor that will
1114
+ create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
1115
+ second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
1116
+ per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
1117
+ multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
1118
+ coordinates of the point. If a different number of points is passed either for each image, or for each
1119
+ mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
1120
+ computation of the embedding will be skipped for these points using the labels.
1121
+ input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
1122
+ Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
1123
+ official implementation, there are 3 types of labels
1124
+
1125
+ - `1`: the point is a point that contains the object of interest
1126
+ - `0`: the point is a point that does not contain the object of interest
1127
+ - `-1`: the point corresponds to the background
1128
+
1129
+ We added the label:
1130
+
1131
+ - `-10`: the point is a padding point, thus should be ignored by the prompt encoder
1132
+
1133
+ The padding labels should be automatically done by the processor.
1134
+ input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
1135
+ Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
1136
+ much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
1137
+ that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
1138
+ size, the number of boxes per image and the coordinates of the top left and botton right point of the box.
1139
+ In the order (`x1`, `y1`, `x2`, `y2`):
1140
+
1141
+ - `x1`: the x coordinate of the top left point of the input box
1142
+ - `y1`: the y coordinate of the top left point of the input box
1143
+ - `x2`: the x coordinate of the bottom right point of the input box
1144
+ - `y2`: the y coordinate of the bottom right point of the input box
1145
+
1146
+ input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
1147
+ SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
1148
+ generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
1149
+ manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
1150
+
1151
+ image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
1152
+ Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory
1153
+ efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
1154
+ method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
1155
+ multimask_output (`bool`, *optional*):
1156
+ In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
1157
+ bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
1158
+ "best" mask, by specifying `multimask_output=False`.
1159
+ attention_similarity (`torch.FloatTensor`, *optional*):
1160
+ Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
1161
+ model is used for personalization as introduced in [PerSAM](https://arxiv.org/abs/2305.03048).
1162
+ target_embedding (`torch.FloatTensor`, *optional*):
1163
+ Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
1164
+ the model is used for personalization as introduced in [PerSAM](https://arxiv.org/abs/2305.03048).
1165
+ output_attentions (`bool`, *optional*):
1166
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1167
+ tensors for more detail.
1168
+ output_hidden_states (`bool`, *optional*):
1169
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1170
+ more detail.
1171
+ return_dict (`bool`, *optional*):
1172
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1173
+ """
1174
+
1175
+
1176
+ @add_start_docstrings(
1177
+ "Segment Anything Model (SAM) for generating segmentation masks, given an input image and ",
1178
+ " optional 2D location and bounding boxes.",
1179
+ SAM_START_DOCSTRING,
1180
+ )
1181
+ class SamModel(SamPreTrainedModel):
1182
+ _tied_weights_keys = ["prompt_encoder.shared_embedding.positional_embedding"]
1183
+
1184
+ def __init__(self, config):
1185
+ super().__init__(config)
1186
+ self.shared_image_embedding = SamPositionalEmbedding(config.vision_config)
1187
+
1188
+ self.vision_encoder = SamVisionEncoder(config.vision_config)
1189
+ self.prompt_encoder = SamPromptEncoder(config.prompt_encoder_config, self.shared_image_embedding)
1190
+ self.mask_decoder = SamMaskDecoder(config.mask_decoder_config)
1191
+
1192
+ self.post_init()
1193
+
1194
+ def get_input_embeddings(self):
1195
+ return self.vision_encoder.get_input_embeddings()
1196
+
1197
+ def get_image_wide_positional_embeddings(self):
1198
+ size = self.config.prompt_encoder_config.image_embedding_size
1199
+ target_device = self.shared_image_embedding.positional_embedding.device
1200
+ target_dtype = self.shared_image_embedding.positional_embedding.dtype
1201
+ grid = torch.ones((size, size), device=target_device, dtype=target_dtype)
1202
+ y_embed = grid.cumsum(dim=0) - 0.5
1203
+ x_embed = grid.cumsum(dim=1) - 0.5
1204
+ y_embed = y_embed / size
1205
+ x_embed = x_embed / size
1206
+
1207
+ positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1))
1208
+ return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width
1209
+
1210
+ @torch.no_grad()
1211
+ def get_image_embeddings(
1212
+ self,
1213
+ pixel_values,
1214
+ output_attentions: Optional[bool] = None,
1215
+ output_hidden_states: Optional[bool] = None,
1216
+ return_dict: Optional[bool] = None,
1217
+ ):
1218
+ r"""
1219
+ Returns the image embeddings by passing the pixel values through the vision encoder.
1220
+
1221
+ Args:
1222
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
1223
+ Input pixel values
1224
+ output_attentions (`bool`, *optional*):
1225
+ Whether or not to return the attentions tensors of all attention layers.
1226
+ output_hidden_states (`bool`, *optional*):
1227
+ Whether or not to return the hidden states of all layers.
1228
+ return_dict (`bool`, *optional*):
1229
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1230
+
1231
+ """
1232
+ vision_output = self.vision_encoder(
1233
+ pixel_values,
1234
+ output_attentions=output_attentions,
1235
+ output_hidden_states=output_hidden_states,
1236
+ return_dict=return_dict,
1237
+ )
1238
+ image_embeddings = vision_output[0]
1239
+ return image_embeddings
1240
+
1241
+ @torch.no_grad()
1242
+ def get_prompt_embeddings(
1243
+ self,
1244
+ input_points: Optional[torch.FloatTensor] = None,
1245
+ input_labels: Optional[torch.LongTensor] = None,
1246
+ input_boxes: Optional[torch.FloatTensor] = None,
1247
+ input_masks: Optional[torch.LongTensor] = None,
1248
+ ):
1249
+ r"""
1250
+ Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.
1251
+
1252
+ Args:
1253
+ input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):
1254
+ Optional input points for the prompt encoder. The padding of the point is automatically done by the
1255
+ processor. `point_batch_size` refers to the number of masks that we want the model to predict per
1256
+ point. The model will output `point_batch_size` times 3 masks in total.
1257
+ input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):
1258
+ Optional input labels for the prompt encoder. The padding of the labels is automatically done by the
1259
+ processor, or can be fed by the user.
1260
+ input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`):
1261
+ Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the
1262
+ processor. users can also pass manually the input boxes.
1263
+ input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`):
1264
+ Optional input masks for the prompt encoder.
1265
+ """
1266
+ prompt_output = self.prompt_encoder(
1267
+ input_points=input_points,
1268
+ input_labels=input_labels,
1269
+ input_boxes=input_boxes,
1270
+ input_masks=input_masks,
1271
+ )
1272
+ return prompt_output
1273
+
1274
+ @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING)
1275
+ def forward(
1276
+ self,
1277
+ pixel_values: Optional[torch.FloatTensor] = None,
1278
+ input_points: Optional[torch.FloatTensor] = None,
1279
+ input_labels: Optional[torch.LongTensor] = None,
1280
+ input_boxes: Optional[torch.FloatTensor] = None,
1281
+ input_masks: Optional[torch.LongTensor] = None,
1282
+ image_embeddings: Optional[torch.FloatTensor] = None,
1283
+ multimask_output: bool = True,
1284
+ attention_similarity: Optional[torch.FloatTensor] = None,
1285
+ target_embedding: Optional[torch.FloatTensor] = None,
1286
+ output_attentions: Optional[bool] = None,
1287
+ output_hidden_states: Optional[bool] = None,
1288
+ return_dict: Optional[bool] = None,
1289
+ **kwargs,
1290
+ ) -> List[Dict[str, torch.Tensor]]:
1291
+ r"""
1292
+ Example:
1293
+
1294
+ ```python
1295
+ >>> from PIL import Image
1296
+ >>> import requests
1297
+ >>> from transformers import AutoModel, AutoProcessor
1298
+
1299
+ >>> model = AutoModel.from_pretrained("facebook/sam-vit-base")
1300
+ >>> processor = AutoProcessor.from_pretrained("facebook/sam-vit-base")
1301
+
1302
+ >>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png"
1303
+ >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
1304
+ >>> input_points = [[[400, 650]]] # 2D location of a window on the car
1305
+ >>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt")
1306
+
1307
+ >>> # Get segmentation mask
1308
+ >>> outputs = model(**inputs)
1309
+
1310
+ >>> # Postprocess masks
1311
+ >>> masks = processor.post_process_masks(
1312
+ ... outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
1313
+ ... )
1314
+ ```
1315
+ """
1316
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1317
+ output_hidden_states = (
1318
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1319
+ )
1320
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1321
+
1322
+ if pixel_values is None and image_embeddings is None:
1323
+ raise ValueError("Either pixel_values or image_embeddings must be provided.")
1324
+
1325
+ if pixel_values is not None and image_embeddings is not None:
1326
+ raise ValueError("Only one of pixel_values and image_embeddings can be provided.")
1327
+
1328
+ if input_points is not None and len(input_points.shape) != 4:
1329
+ raise ValueError(
1330
+ "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.",
1331
+ " got {}.".format(input_points.shape),
1332
+ )
1333
+ if input_boxes is not None and len(input_boxes.shape) != 3:
1334
+ raise ValueError(
1335
+ "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.",
1336
+ " got {}.".format(input_boxes.shape),
1337
+ )
1338
+ if input_points is not None and input_boxes is not None:
1339
+ point_batch_size = input_points.shape[1]
1340
+ box_batch_size = input_boxes.shape[1]
1341
+ if point_batch_size != box_batch_size:
1342
+ raise ValueError(
1343
+ "You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
1344
+ point_batch_size, box_batch_size
1345
+ )
1346
+ )
1347
+
1348
+ image_positional_embeddings = self.get_image_wide_positional_embeddings()
1349
+ # repeat with batch size
1350
+ batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0]
1351
+ image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)
1352
+
1353
+ vision_attentions = None
1354
+ vision_hidden_states = None
1355
+
1356
+ if pixel_values is not None:
1357
+ vision_outputs = self.vision_encoder(
1358
+ pixel_values,
1359
+ output_attentions=output_attentions,
1360
+ output_hidden_states=output_hidden_states,
1361
+ return_dict=return_dict,
1362
+ )
1363
+ image_embeddings = vision_outputs[0]
1364
+
1365
+ if output_hidden_states:
1366
+ vision_hidden_states = vision_outputs[1]
1367
+ if output_attentions:
1368
+ vision_attentions = vision_outputs[-1]
1369
+
1370
+ if input_points is not None and input_labels is None:
1371
+ input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)
1372
+
1373
+ if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]:
1374
+ raise ValueError(
1375
+ "The batch size of the image embeddings and the input points must be the same. ",
1376
+ "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]),
1377
+ " if you want to pass multiple points for the same image, make sure that you passed ",
1378
+ " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ",
1379
+ " input_labels of shape (batch_size, point_batch_size, num_points_per_image)",
1380
+ )
1381
+
1382
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
1383
+ input_points=input_points,
1384
+ input_labels=input_labels,
1385
+ input_boxes=input_boxes,
1386
+ input_masks=input_masks,
1387
+ )
1388
+
1389
+ low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder(
1390
+ image_embeddings=image_embeddings,
1391
+ image_positional_embeddings=image_positional_embeddings,
1392
+ sparse_prompt_embeddings=sparse_embeddings,
1393
+ dense_prompt_embeddings=dense_embeddings,
1394
+ multimask_output=multimask_output,
1395
+ attention_similarity=attention_similarity,
1396
+ target_embedding=target_embedding,
1397
+ output_attentions=output_attentions,
1398
+ )
1399
+
1400
+ if not return_dict:
1401
+ output = (iou_predictions, low_res_masks)
1402
+ if output_hidden_states:
1403
+ output = output + (vision_hidden_states,)
1404
+
1405
+ if output_attentions:
1406
+ output = output + (vision_attentions, mask_decoder_attentions)
1407
+ return output
1408
+
1409
+ return SamImageSegmentationOutput(
1410
+ iou_scores=iou_predictions,
1411
+ pred_masks=low_res_masks,
1412
+ vision_hidden_states=vision_hidden_states,
1413
+ vision_attentions=vision_attentions,
1414
+ mask_decoder_attentions=mask_decoder_attentions,
1415
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/sam/modeling_tf_sam.py ADDED
@@ -0,0 +1,1656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Meta AI Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ TensorFlow SAM model. This file was mostly generated by auto-translation from the PyTorch original. In the event of a
17
+ discrepancy, the original file should be regarded as the 'reference' version.
18
+ """
19
+
20
+
21
+ from __future__ import annotations
22
+
23
+ import collections
24
+ from dataclasses import dataclass
25
+ from typing import Optional, Tuple, Union
26
+
27
+ import numpy as np
28
+ import tensorflow as tf
29
+
30
+ from ...activations_tf import ACT2FN
31
+ from ...modeling_tf_outputs import TFBaseModelOutput
32
+ from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, keras, shape_list, unpack_inputs
33
+ from ...tf_utils import flatten, functional_layernorm
34
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
35
+ from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CONFIG_FOR_DOC = "SamConfig"
41
+ _CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge"
42
+
43
+
44
+ from ..deprecated._archive_maps import TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
45
+
46
+
47
+ @dataclass
48
+ class TFSamVisionEncoderOutput(ModelOutput):
49
+ """
50
+ Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection
51
+ layer to the pooler_output.
52
+
53
+ Args:
54
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
55
+ The image embeddings obtained by applying the projection layer to the pooler_output.
56
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
57
+ Sequence of hidden-states at the output of the last layer of the model.
58
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
59
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
60
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
61
+
62
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
63
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
64
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
65
+ sequence_length)`.
66
+
67
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
68
+ heads.
69
+ """
70
+
71
+ image_embeds: tf.Tensor | None = None
72
+ last_hidden_state: tf.Tensor = None
73
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
74
+ attentions: Tuple[tf.Tensor, ...] | None = None
75
+
76
+
77
+ @dataclass
78
+ class TFSamImageSegmentationOutput(ModelOutput):
79
+ """
80
+ Base class for Segment-Anything model's output
81
+
82
+ Args:
83
+ iou_scores (`tf.Tensor` of shape `(batch_size, num_masks)`):
84
+ The iou scores of the predicted masks.
85
+ pred_masks (`tf.Tensor` of shape `(batch_size, num_masks, height, width)`):
86
+ The predicted low resolutions masks. Needs to be post-processed by the processor
87
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
88
+ Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for
89
+ the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
90
+
91
+ Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs.
92
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
93
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
94
+ sequence_length)`.
95
+
96
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
97
+ heads.
98
+ mask_decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
99
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
100
+ sequence_length)`.
101
+
102
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
103
+ heads.
104
+ """
105
+
106
+ iou_scores: tf.Tensor = None
107
+ pred_masks: tf.Tensor = None
108
+ vision_hidden_states: Tuple[tf.Tensor, ...] | None = None
109
+ vision_attentions: Tuple[tf.Tensor, ...] | None = None
110
+ mask_decoder_attentions: Tuple[tf.Tensor, ...] | None = None
111
+
112
+
113
+ class TFSamPatchEmbeddings(keras.layers.Layer):
114
+ """
115
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
116
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
117
+ Transformer.
118
+ """
119
+
120
+ def __init__(self, config, **kwargs):
121
+ super().__init__(**kwargs)
122
+ image_size, patch_size = config.image_size, config.patch_size
123
+ num_channels, hidden_size = config.num_channels, config.hidden_size
124
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
125
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
126
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
127
+ self.image_size = image_size
128
+ self.patch_size = patch_size
129
+ self.num_channels = num_channels
130
+ self.num_patches = num_patches
131
+
132
+ self.projection = keras.layers.Conv2D(
133
+ hidden_size, kernel_size=patch_size, strides=patch_size, name="projection"
134
+ )
135
+
136
+ def call(self, pixel_values):
137
+ batch_size, num_channels, height, width = shape_list(pixel_values)
138
+ if num_channels != self.num_channels:
139
+ raise ValueError(
140
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
141
+ )
142
+ if height != self.image_size[0] or width != self.image_size[1]:
143
+ raise ValueError(
144
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
145
+ )
146
+ embeddings = self.projection(tf.transpose(pixel_values, perm=[0, 2, 3, 1]))
147
+ return embeddings
148
+
149
+ def build(self, input_shape=None):
150
+ if self.built:
151
+ return
152
+ self.built = True
153
+ if getattr(self, "projection", None) is not None:
154
+ with tf.name_scope(self.projection.name):
155
+ self.projection.build([None, None, None, self.num_channels])
156
+
157
+
158
+ class TFSamMLPBlock(keras.layers.Layer):
159
+ def __init__(self, config, **kwargs):
160
+ super().__init__(**kwargs)
161
+ self.lin1 = keras.layers.Dense(config.mlp_dim, name="lin1")
162
+ self.lin2 = keras.layers.Dense(config.hidden_size, name="lin2")
163
+ self.act = ACT2FN[config.hidden_act]
164
+ self.config = config
165
+
166
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
167
+ hidden_states = self.lin1(hidden_states)
168
+ hidden_states = self.act(hidden_states)
169
+ hidden_states = self.lin2(hidden_states)
170
+ return hidden_states
171
+
172
+ def build(self, input_shape=None):
173
+ if self.built:
174
+ return
175
+ self.built = True
176
+ if getattr(self, "lin1", None) is not None:
177
+ with tf.name_scope(self.lin1.name):
178
+ self.lin1.build([None, None, self.config.hidden_size])
179
+ if getattr(self, "lin2", None) is not None:
180
+ with tf.name_scope(self.lin2.name):
181
+ self.lin2.build([None, None, self.config.mlp_dim])
182
+
183
+
184
+ class TFSamLayerNorm(keras.layers.Layer):
185
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
186
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
187
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
188
+ """
189
+
190
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last", **kwargs):
191
+ super().__init__(**kwargs)
192
+ self.eps = eps
193
+ self.data_format = data_format
194
+ self.normalized_shape = normalized_shape
195
+ if self.data_format not in ["channels_last", "channels_first"]:
196
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
197
+
198
+ def build(self, input_shape):
199
+ self.weight = self.add_weight(shape=self.normalized_shape, initializer="ones", name="weight")
200
+ self.bias = self.add_weight(shape=self.normalized_shape, initializer="zeros", name="bias")
201
+ super().build(input_shape)
202
+
203
+ def call(self, x: tf.Tensor) -> tf.Tensor:
204
+ if self.data_format == "channels_last":
205
+ x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=-1)
206
+ elif self.data_format == "channels_first":
207
+ x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=1)
208
+ return x
209
+
210
+
211
+ class TFSamAttention(keras.layers.Layer):
212
+ """
213
+ SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
214
+ values.
215
+ """
216
+
217
+ def __init__(self, config, downsample_rate=None, **kwargs):
218
+ super().__init__(**kwargs)
219
+ self.hidden_size = config.hidden_size
220
+
221
+ downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate
222
+
223
+ self.internal_dim = config.hidden_size // downsample_rate
224
+ self.num_attention_heads = config.num_attention_heads
225
+ if self.internal_dim % config.num_attention_heads != 0:
226
+ raise ValueError("num_attention_heads must divide hidden_size.")
227
+
228
+ self.q_proj = keras.layers.Dense(self.internal_dim, name="q_proj")
229
+ self.k_proj = keras.layers.Dense(self.internal_dim, name="k_proj")
230
+ self.v_proj = keras.layers.Dense(self.internal_dim, name="v_proj")
231
+ self.out_proj = keras.layers.Dense(self.hidden_size, name="out_proj")
232
+
233
+ def _separate_heads(self, hidden_states: tf.Tensor, num_attention_heads: int) -> tf.Tensor:
234
+ batch, point_batch_size, n_tokens, channel = shape_list(hidden_states)
235
+ c_per_head = channel // num_attention_heads
236
+ hidden_states = tf.reshape(
237
+ hidden_states, (batch * point_batch_size, n_tokens, num_attention_heads, c_per_head)
238
+ )
239
+ return tf.transpose(hidden_states, perm=[0, 2, 1, 3])
240
+
241
+ def _recombine_heads(self, hidden_states: tf.Tensor, point_batch_size: int) -> tf.Tensor:
242
+ batch, n_heads, n_tokens, c_per_head = shape_list(hidden_states)
243
+ hidden_states = tf.transpose(hidden_states, perm=[0, 2, 1, 3])
244
+ return tf.reshape(
245
+ hidden_states,
246
+ (batch // tf.reduce_max([1, point_batch_size]), point_batch_size, n_tokens, n_heads * c_per_head),
247
+ )
248
+
249
+ def call(self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor) -> tf.Tensor:
250
+ # Input projections
251
+ query = self.q_proj(query)
252
+ key = self.k_proj(key)
253
+ value = self.v_proj(value)
254
+
255
+ point_batch_size = shape_list(query)[1]
256
+ # Separate into heads
257
+ query = self._separate_heads(query, self.num_attention_heads)
258
+ key = self._separate_heads(key, self.num_attention_heads)
259
+ value = self._separate_heads(value, self.num_attention_heads)
260
+
261
+ # SamAttention
262
+ _, _, _, c_per_head = shape_list(query)
263
+ attn = tf.matmul(
264
+ query, tf.transpose(key, perm=[0, 1, 3, 2])
265
+ ) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens
266
+ attn = attn / tf.math.sqrt(float(c_per_head))
267
+ attn = tf.nn.softmax(attn, axis=-1)
268
+
269
+ # Get output
270
+ out = tf.matmul(attn, value)
271
+ out = self._recombine_heads(out, point_batch_size)
272
+ out = self.out_proj(out)
273
+
274
+ return out
275
+
276
+ def build(self, input_shape=None):
277
+ if self.built:
278
+ return
279
+ self.built = True
280
+ if getattr(self, "q_proj", None) is not None:
281
+ with tf.name_scope(self.q_proj.name):
282
+ self.q_proj.build([None, None, self.hidden_size])
283
+ if getattr(self, "k_proj", None) is not None:
284
+ with tf.name_scope(self.k_proj.name):
285
+ self.k_proj.build([None, None, self.hidden_size])
286
+ if getattr(self, "v_proj", None) is not None:
287
+ with tf.name_scope(self.v_proj.name):
288
+ self.v_proj.build([None, None, self.hidden_size])
289
+ if getattr(self, "out_proj", None) is not None:
290
+ with tf.name_scope(self.out_proj.name):
291
+ self.out_proj.build([None, None, self.internal_dim])
292
+
293
+
294
+ class TFSamTwoWayAttentionBlock(keras.layers.Layer):
295
+ def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False, **kwargs):
296
+ """
297
+ A transformer block with four layers:
298
+ (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on
299
+ sparse inputs (4) cross attention of dense inputs -> sparse inputs
300
+
301
+ Arguments:
302
+ config (`SamMaskDecoderConfig`):
303
+ The configuration file used to instantiate the block
304
+ attention_downsample_rate (*optionalk*, int, defaults to 2):
305
+ The downsample ratio of the block used to reduce the inner dim of the attention.
306
+ skip_first_layer_pe (*optional*, bool, defaults to `False`):
307
+ Whether or not to skip the addition of the query_point_embedding on the first layer.
308
+ """
309
+ super().__init__(**kwargs)
310
+
311
+ self.hidden_size = config.hidden_size
312
+ self.layer_norm_eps = config.layer_norm_eps
313
+
314
+ self.self_attn = TFSamAttention(config, downsample_rate=1, name="self_attn")
315
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm1")
316
+
317
+ self.cross_attn_token_to_image = TFSamAttention(
318
+ config, downsample_rate=attention_downsample_rate, name="cross_attn_token_to_image"
319
+ )
320
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm2")
321
+
322
+ self.mlp = TFSamMLPBlock(config, name="mlp")
323
+ self.layer_norm3 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm3")
324
+
325
+ self.layer_norm4 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm4")
326
+ self.cross_attn_image_to_token = TFSamAttention(
327
+ config, downsample_rate=attention_downsample_rate, name="cross_attn_image_to_token"
328
+ )
329
+
330
+ self.skip_first_layer_pe = skip_first_layer_pe
331
+
332
+ def call(
333
+ self,
334
+ queries: tf.Tensor,
335
+ keys: tf.Tensor,
336
+ query_point_embedding: tf.Tensor,
337
+ key_point_embedding: tf.Tensor,
338
+ output_attentions: bool = False,
339
+ ):
340
+ # Self attention block
341
+ if self.skip_first_layer_pe:
342
+ queries = self.self_attn(query=queries, key=queries, value=queries)
343
+ else:
344
+ query = queries + query_point_embedding
345
+ attn_out = self.self_attn(query=query, key=query, value=queries)
346
+ queries = queries + attn_out
347
+ queries = self.layer_norm1(queries)
348
+
349
+ # Cross attention block, tokens attending to image embedding
350
+ query = queries + query_point_embedding
351
+ key = keys + key_point_embedding
352
+
353
+ attn_out = self.cross_attn_token_to_image(query=query, key=key, value=keys)
354
+ queries = queries + attn_out
355
+
356
+ queries = self.layer_norm2(queries)
357
+
358
+ # MLP block
359
+ mlp_out = self.mlp(queries)
360
+ queries = queries + mlp_out
361
+ queries = self.layer_norm3(queries)
362
+
363
+ # Cross attention block, image embedding attending to tokens
364
+ query = queries + query_point_embedding
365
+ key = keys + key_point_embedding
366
+
367
+ attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries)
368
+ keys = keys + attn_out
369
+
370
+ keys = self.layer_norm4(keys)
371
+
372
+ outputs = (queries, keys)
373
+
374
+ if output_attentions:
375
+ outputs = outputs + (attn_out,)
376
+ else:
377
+ outputs = outputs + (None,)
378
+
379
+ return outputs
380
+
381
+ def build(self, input_shape=None):
382
+ if self.built:
383
+ return
384
+ self.built = True
385
+ if getattr(self, "self_attn", None) is not None:
386
+ with tf.name_scope(self.self_attn.name):
387
+ self.self_attn.build(None)
388
+ if getattr(self, "layer_norm1", None) is not None:
389
+ with tf.name_scope(self.layer_norm1.name):
390
+ self.layer_norm1.build([None, None, None, self.hidden_size])
391
+ if getattr(self, "cross_attn_token_to_image", None) is not None:
392
+ with tf.name_scope(self.cross_attn_token_to_image.name):
393
+ self.cross_attn_token_to_image.build(None)
394
+ if getattr(self, "layer_norm2", None) is not None:
395
+ with tf.name_scope(self.layer_norm2.name):
396
+ self.layer_norm2.build([None, None, None, self.hidden_size])
397
+ if getattr(self, "mlp", None) is not None:
398
+ with tf.name_scope(self.mlp.name):
399
+ self.mlp.build(None)
400
+ if getattr(self, "layer_norm3", None) is not None:
401
+ with tf.name_scope(self.layer_norm3.name):
402
+ self.layer_norm3.build([None, None, None, self.hidden_size])
403
+ if getattr(self, "layer_norm4", None) is not None:
404
+ with tf.name_scope(self.layer_norm4.name):
405
+ self.layer_norm4.build([None, None, None, self.hidden_size])
406
+ if getattr(self, "cross_attn_image_to_token", None) is not None:
407
+ with tf.name_scope(self.cross_attn_image_to_token.name):
408
+ self.cross_attn_image_to_token.build(None)
409
+
410
+
411
+ class TFSamTwoWayTransformer(keras.layers.Layer):
412
+ def __init__(self, config: SamMaskDecoderConfig, **kwargs):
413
+ super().__init__(**kwargs)
414
+ self.config = config
415
+
416
+ self.num_hidden_layers = config.num_hidden_layers
417
+ self.layers = []
418
+
419
+ for i in range(self.num_hidden_layers):
420
+ self.layers.append(TFSamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0), name=f"layers_._{i}"))
421
+
422
+ self.final_attn_token_to_image = TFSamAttention(config, name="final_attn_token_to_image")
423
+ self.layer_norm_final_attn = keras.layers.LayerNormalization(
424
+ epsilon=config.layer_norm_eps, name="layer_norm_final_attn"
425
+ )
426
+
427
+ def call(
428
+ self,
429
+ point_embeddings: tf.Tensor,
430
+ image_embeddings: tf.Tensor,
431
+ image_positional_embeddings: tf.Tensor,
432
+ output_attentions: Optional[bool] = None,
433
+ output_hidden_states: Optional[bool] = None,
434
+ return_dict: Optional[bool] = None,
435
+ ) -> Union[Tuple, TFBaseModelOutput]:
436
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
437
+ output_hidden_states = (
438
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
439
+ )
440
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
441
+
442
+ all_attentions = ()
443
+
444
+ if image_embeddings is None:
445
+ raise ValueError("You have to specify an image_embedding")
446
+
447
+ image_embeddings = tf.transpose(flatten(image_embeddings, 2), perm=(0, 2, 1))[:, None]
448
+ image_positional_embeddings = tf.transpose(flatten(image_positional_embeddings, 2), (0, 2, 1))[:, None]
449
+
450
+ # Prepare queries
451
+ queries = point_embeddings
452
+ keys = image_embeddings
453
+
454
+ # Apply transformer blocks and final layernorm
455
+ for layer in self.layers:
456
+ queries, keys, attention_outputs = layer(
457
+ queries=queries,
458
+ keys=keys,
459
+ query_point_embedding=point_embeddings,
460
+ key_point_embedding=image_positional_embeddings,
461
+ output_attentions=output_attentions,
462
+ )
463
+
464
+ if output_attentions:
465
+ all_attentions = all_attentions + (attention_outputs,)
466
+
467
+ # Apply the final attenion layer from the points to the image
468
+ query = queries + point_embeddings
469
+ key = keys + image_positional_embeddings
470
+
471
+ attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys)
472
+
473
+ queries = queries + attn_out
474
+ queries = self.layer_norm_final_attn(queries)
475
+ return queries, keys, all_attentions
476
+
477
+ def build(self, input_shape=None):
478
+ if self.built:
479
+ return
480
+ self.built = True
481
+ if getattr(self, "final_attn_token_to_image", None) is not None:
482
+ with tf.name_scope(self.final_attn_token_to_image.name):
483
+ self.final_attn_token_to_image.build(None)
484
+ if getattr(self, "layer_norm_final_attn", None) is not None:
485
+ with tf.name_scope(self.layer_norm_final_attn.name):
486
+ self.layer_norm_final_attn.build([None, None, None, self.config.hidden_size])
487
+ for layer in self.layers:
488
+ with tf.name_scope(layer.name):
489
+ layer.build(None)
490
+
491
+
492
+ class TFSamFeedForward(keras.layers.Layer):
493
+ def __init__(
494
+ self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False, **kwargs
495
+ ):
496
+ super().__init__(**kwargs)
497
+ self.num_layers = num_layers
498
+ self.activation = keras.layers.ReLU()
499
+ self.proj_in = keras.layers.Dense(hidden_dim, input_shape=(input_dim,), name="proj_in")
500
+ self.proj_out = keras.layers.Dense(output_dim, input_shape=(hidden_dim,), name="proj_out")
501
+ self.layers = [
502
+ keras.layers.Dense(hidden_dim, input_shape=(hidden_dim,), name=f"layers_._{i}")
503
+ for i in range(num_layers - 2)
504
+ ]
505
+ self.sigmoid_output = sigmoid_output
506
+ self.hidden_dim = hidden_dim
507
+ self.input_dim = input_dim
508
+
509
+ def call(self, hidden_states):
510
+ hidden_states = self.proj_in(hidden_states)
511
+ hidden_states = self.activation(hidden_states)
512
+ for layer in self.layers:
513
+ hidden_states = self.activation(layer(hidden_states))
514
+
515
+ hidden_states = self.proj_out(hidden_states)
516
+ if self.sigmoid_output:
517
+ hidden_states = tf.sigmoid(hidden_states)
518
+ return hidden_states
519
+
520
+ def build(self, input_shape=None):
521
+ if self.built:
522
+ return
523
+ self.built = True
524
+ if getattr(self, "proj_in", None) is not None:
525
+ with tf.name_scope(self.proj_in.name):
526
+ self.proj_in.build([None, None, self.input_dim])
527
+ if getattr(self, "proj_out", None) is not None:
528
+ with tf.name_scope(self.proj_out.name):
529
+ self.proj_out.build([None, None, self.hidden_dim])
530
+ if getattr(self, "layers", None) is not None:
531
+ for layer in self.layers:
532
+ with tf.name_scope(layer.name):
533
+ layer.build([None, None, self.hidden_dim])
534
+
535
+
536
+ class TFSamMaskDecoder(keras.layers.Layer):
537
+ def __init__(self, config: SamMaskDecoderConfig, **kwargs):
538
+ super().__init__(**kwargs)
539
+
540
+ self.hidden_size = config.hidden_size
541
+
542
+ self.num_multimask_outputs = config.num_multimask_outputs
543
+ self.num_mask_tokens = config.num_multimask_outputs + 1
544
+
545
+ self.transformer = TFSamTwoWayTransformer(config, name="transformer")
546
+
547
+ self.upscale_conv1 = keras.layers.Conv2DTranspose(
548
+ self.hidden_size // 4, kernel_size=2, strides=2, name="upscale_conv1", data_format="channels_first"
549
+ )
550
+ self.upscale_conv2 = keras.layers.Conv2DTranspose(
551
+ self.hidden_size // 8, kernel_size=2, strides=2, name="upscale_conv2", data_format="channels_first"
552
+ )
553
+ self.upscale_layer_norm = TFSamLayerNorm(
554
+ self.hidden_size // 4, data_format="channels_first", name="upscale_layer_norm"
555
+ )
556
+ self.activation = tf.nn.gelu
557
+
558
+ mlps_list = []
559
+ for i in range(self.num_mask_tokens):
560
+ mlps_list += [
561
+ TFSamFeedForward(
562
+ self.hidden_size,
563
+ self.hidden_size,
564
+ self.hidden_size // 8,
565
+ 3,
566
+ name=f"output_hypernetworks_mlps_._{i}",
567
+ )
568
+ ]
569
+ self.output_hypernetworks_mlps = mlps_list
570
+
571
+ self.iou_prediction_head = TFSamFeedForward(
572
+ self.hidden_size,
573
+ config.iou_head_hidden_dim,
574
+ self.num_mask_tokens,
575
+ config.iou_head_depth,
576
+ name="iou_prediction_head",
577
+ )
578
+
579
+ def build(self, input_shape=None):
580
+ if self.built:
581
+ return
582
+ self.built = True
583
+ self.iou_token = self.add_weight(shape=(1, self.hidden_size), name="iou_token.weight", trainable=True)
584
+ self.mask_tokens = self.add_weight(
585
+ shape=(self.num_mask_tokens, self.hidden_size), name="mask_tokens.weight", trainable=True
586
+ )
587
+
588
+ if getattr(self, "transformer", None) is not None:
589
+ with tf.name_scope(self.transformer.name):
590
+ self.transformer.build(None)
591
+ if getattr(self, "upscale_conv1", None) is not None:
592
+ with tf.name_scope(self.upscale_conv1.name):
593
+ self.upscale_conv1.build([None, self.hidden_size, None, None])
594
+ if getattr(self, "upscale_conv2", None) is not None:
595
+ with tf.name_scope(self.upscale_conv2.name):
596
+ self.upscale_conv2.build([None, self.hidden_size // 4, None, None])
597
+ if getattr(self, "upscale_layer_norm", None) is not None:
598
+ with tf.name_scope(self.upscale_layer_norm.name):
599
+ self.upscale_layer_norm.build(None)
600
+ if getattr(self, "iou_prediction_head", None) is not None:
601
+ with tf.name_scope(self.iou_prediction_head.name):
602
+ self.iou_prediction_head.build(None)
603
+ for mlp in self.output_hypernetworks_mlps:
604
+ with tf.name_scope(mlp.name):
605
+ mlp.build(None)
606
+
607
+ def call(
608
+ self,
609
+ image_embeddings: tf.Tensor,
610
+ image_positional_embeddings: tf.Tensor,
611
+ sparse_prompt_embeddings: tf.Tensor,
612
+ dense_prompt_embeddings: tf.Tensor,
613
+ multimask_output: bool,
614
+ output_attentions: Optional[bool] = None,
615
+ ) -> Tuple[tf.Tensor, tf.Tensor]:
616
+ batch_size, num_channels, height, width = shape_list(image_embeddings)
617
+ point_batch_size = tf.math.maximum(1, tf.shape(sparse_prompt_embeddings)[1])
618
+
619
+ output_tokens = tf.concat([self.iou_token, self.mask_tokens], axis=0) # Should be (1, 32) + (4, 32) = (5, 32)
620
+ output_tokens = tf.tile(
621
+ output_tokens[None, None, :], [batch_size, point_batch_size, 1, 1]
622
+ ) # Should be (batch_size, point_size, 5, 32)
623
+
624
+ # Matt: The original Torch code checked that the sum of sparse_prompt_embeddings equalled 0. However, this only
625
+ # happens when the sparse prompt embeddings are an empty tensor with shape[1] == 0. I replaced
626
+ # it with an explicit shape check to avoid data-dependent control flow which breaks XLA.
627
+ if shape_list(sparse_prompt_embeddings)[1] != 0:
628
+ tokens = tf.concat((output_tokens, sparse_prompt_embeddings), axis=2)
629
+ else:
630
+ tokens = output_tokens
631
+ point_embeddings = tf.cast(tokens, self.iou_token.dtype)
632
+
633
+ image_embeddings = image_embeddings + dense_prompt_embeddings
634
+ image_embeddings = tf.repeat(image_embeddings, point_batch_size, axis=0)
635
+ image_positional_embeddings = tf.repeat(image_positional_embeddings, point_batch_size, axis=0)
636
+
637
+ point_embedding, image_embeddings, attentions = self.transformer(
638
+ point_embeddings=point_embeddings,
639
+ image_embeddings=image_embeddings,
640
+ image_positional_embeddings=image_positional_embeddings,
641
+ output_attentions=output_attentions,
642
+ )
643
+ iou_token_out = point_embedding[:, :, 0, :]
644
+ mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :]
645
+
646
+ image_embeddings = tf.transpose(image_embeddings, perm=(0, 1, 3, 2))
647
+ image_embeddings = tf.reshape(image_embeddings, [batch_size * point_batch_size, num_channels, height, width])
648
+
649
+ upscaled_embedding = self.upscale_conv1(image_embeddings)
650
+ upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
651
+ upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding))
652
+
653
+ hyper_in_list = []
654
+ for i in range(self.num_mask_tokens):
655
+ current_mlp = self.output_hypernetworks_mlps[i]
656
+ hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]
657
+ hyper_in = tf.stack(hyper_in_list, axis=2)
658
+
659
+ _, num_channels, height, width = shape_list(upscaled_embedding)
660
+ upscaled_embedding = tf.reshape(
661
+ upscaled_embedding, [batch_size, point_batch_size, num_channels, height * width]
662
+ )
663
+ masks = tf.reshape(hyper_in @ upscaled_embedding, [batch_size, point_batch_size, -1, height, width])
664
+
665
+ iou_pred = self.iou_prediction_head(iou_token_out)
666
+
667
+ if multimask_output:
668
+ mask_slice = slice(1, None)
669
+ else:
670
+ mask_slice = slice(0, 1)
671
+ masks = masks[:, :, mask_slice, :, :]
672
+ iou_pred = iou_pred[:, :, mask_slice]
673
+
674
+ outputs = (masks, iou_pred)
675
+
676
+ if output_attentions:
677
+ outputs = outputs + (attentions,)
678
+ else:
679
+ outputs = outputs + (None,)
680
+
681
+ return outputs
682
+
683
+
684
+ class TFSamPositionalEmbedding(keras.layers.Layer):
685
+ def __init__(self, config, **kwargs):
686
+ super().__init__(**kwargs)
687
+ self.scale = config.hidden_size // 2
688
+ self.config = config
689
+
690
+ def build(self, input_shape):
691
+ # TODO Matt: What is going on here? Why is a non-trainable weight randomly initialized?
692
+ self.positional_embedding = self.add_weight(
693
+ name="positional_embedding",
694
+ shape=(2, self.config.num_pos_feats),
695
+ initializer=keras.initializers.RandomNormal(mean=0.0, stddev=self.scale),
696
+ trainable=False,
697
+ )
698
+ super().build(input_shape)
699
+
700
+ def call(self, input_coords, input_shape=None):
701
+ """Positionally encode points that are normalized to [0,1]."""
702
+ coordinates = tf.identity(input_coords)
703
+
704
+ if input_shape is not None:
705
+ coordinates = tf.stack(
706
+ [
707
+ tf.cast(coordinates[:, :, :, 0], tf.float32) / input_shape[1],
708
+ tf.cast(coordinates[:, :, :, 1], tf.float32) / input_shape[0],
709
+ ],
710
+ axis=-1,
711
+ )
712
+
713
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
714
+ coordinates = 2 * coordinates - 1
715
+ coordinates = tf.cast(coordinates, self.positional_embedding.dtype)
716
+ coordinates = tf.matmul(coordinates, self.positional_embedding)
717
+ coordinates = 2 * np.pi * coordinates
718
+ # outputs d_1 x ... x d_n x channel shape
719
+ return tf.concat([tf.sin(coordinates), tf.cos(coordinates)], axis=-1)
720
+
721
+
722
+ class TFSamMaskEmbedding(keras.layers.Layer):
723
+ def __init__(self, config: SamPromptEncoderConfig, **kwargs):
724
+ super().__init__(**kwargs)
725
+ self.mask_input_channels = config.mask_input_channels // 4
726
+ self.activation = ACT2FN[config.hidden_act]
727
+ self.conv1 = keras.layers.Conv2D(self.mask_input_channels, kernel_size=2, strides=2, name="conv1")
728
+ self.conv2 = keras.layers.Conv2D(config.mask_input_channels, kernel_size=2, strides=2, name="conv2")
729
+ self.conv3 = keras.layers.Conv2D(config.hidden_size, kernel_size=1, name="conv3")
730
+ self.layer_norm1 = TFSamLayerNorm(self.mask_input_channels, config.layer_norm_eps, name="layer_norm1")
731
+ self.layer_norm2 = TFSamLayerNorm(self.mask_input_channels * 4, config.layer_norm_eps, name="layer_norm2")
732
+ self.config = config
733
+
734
+ def call(self, masks):
735
+ masks = tf.transpose(masks, perm=(0, 2, 3, 1)) # Convert to channels-last
736
+ hidden_states = self.conv1(masks)
737
+ hidden_states = self.layer_norm1(hidden_states)
738
+ hidden_states = self.activation(hidden_states)
739
+
740
+ hidden_states = self.conv2(hidden_states)
741
+ hidden_states = self.layer_norm2(hidden_states)
742
+ hidden_states = self.activation(hidden_states)
743
+ dense_embeddings = self.conv3(hidden_states)
744
+ dense_embeddings = tf.transpose(dense_embeddings, perm=(0, 3, 1, 2)) # Convert back to channels-first
745
+ return dense_embeddings
746
+
747
+ def build(self, input_shape=None):
748
+ # This class needs an explicit build method because it isn't called with the standard dummy inputs
749
+ if self.built:
750
+ return
751
+ self.built = True
752
+ with tf.name_scope("conv1"):
753
+ self.conv1.build([None, None, None, 1])
754
+ with tf.name_scope("conv2"):
755
+ self.conv2.build([None, None, None, self.mask_input_channels])
756
+ with tf.name_scope("conv3"):
757
+ self.conv3.build([None, None, None, self.mask_input_channels * 4])
758
+ with tf.name_scope("layer_norm1"):
759
+ self.layer_norm1.build([None, None, None, self.mask_input_channels])
760
+ with tf.name_scope("layer_norm2"):
761
+ self.layer_norm2.build([None, None, None, self.mask_input_channels * 4])
762
+
763
+
764
+ class TFSamPromptEncoder(keras.layers.Layer):
765
+ def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding, **kwargs):
766
+ super().__init__(**kwargs)
767
+ self.shared_embedding = shared_patch_embedding
768
+ self.mask_embed = TFSamMaskEmbedding(config, name="mask_embed")
769
+ self.no_mask_embed = None
770
+
771
+ self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size)
772
+ self.input_image_size = config.image_size
773
+
774
+ self.point_embed = []
775
+ self.hidden_size = config.hidden_size
776
+ self.not_a_point_embed = None
777
+ self.config = config
778
+
779
+ def build(self, input_shape=None):
780
+ self.no_mask_embed = self.add_weight(
781
+ name="no_mask_embed.weight",
782
+ shape=(1, self.hidden_size),
783
+ initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02),
784
+ trainable=True,
785
+ )
786
+ self.point_embed = [
787
+ self.add_weight(
788
+ name=f"point_embed_._{i}.weight",
789
+ shape=(1, self.hidden_size),
790
+ initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02),
791
+ trainable=True,
792
+ )
793
+ for i in range(self.config.num_point_embeddings)
794
+ ]
795
+ self.not_a_point_embed = self.add_weight(
796
+ name="not_a_point_embed.weight",
797
+ shape=(1, self.hidden_size),
798
+ initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02),
799
+ trainable=True,
800
+ )
801
+ with tf.name_scope("mask_embed"):
802
+ # We must explicitly build the mask embed because it isn't touched by the standard dummy inputs
803
+ self.mask_embed.build(
804
+ (None, self.config.mask_input_channels, self.config.image_size, self.config.image_size)
805
+ )
806
+
807
+ if self.built:
808
+ return
809
+ self.built = True
810
+ if getattr(self, "mask_embed", None) is not None:
811
+ with tf.name_scope(self.mask_embed.name):
812
+ self.mask_embed.build(None)
813
+
814
+ def _embed_points(self, points: tf.Tensor, labels: tf.Tensor, pad: bool) -> tf.Tensor:
815
+ """Embeds point prompts."""
816
+ points = points + 0.5 # Shift to center of pixel
817
+ if pad:
818
+ target_point_shape = (shape_list(points)[0], shape_list(points)[1], 1, shape_list(points)[-1])
819
+ target_labels_shape = (shape_list(points)[0], shape_list(points)[1], 1)
820
+ padding_point = tf.zeros(target_point_shape, dtype=points.dtype)
821
+ padding_label = -tf.ones(target_labels_shape, dtype=labels.dtype)
822
+ points = tf.concat([points, padding_point], axis=2)
823
+ labels = tf.concat([labels, padding_label], axis=2)
824
+ input_shape = (self.input_image_size, self.input_image_size)
825
+ point_embedding = self.shared_embedding(points, input_shape)
826
+
827
+ point_embedding = tf.where(labels[..., None] == -1, self.not_a_point_embed[0], point_embedding)
828
+
829
+ point_embedding = tf.where(
830
+ labels[..., None] != -10,
831
+ point_embedding,
832
+ tf.zeros_like(point_embedding),
833
+ )
834
+ point_embedding = tf.where(
835
+ (labels == 0)[:, :, :, None], point_embedding + self.point_embed[0], point_embedding
836
+ )
837
+ point_embedding = tf.where(
838
+ (labels == 1)[:, :, :, None], point_embedding + self.point_embed[1], point_embedding
839
+ )
840
+ return point_embedding
841
+
842
+ def _embed_boxes(self, boxes: tf.Tensor) -> tf.Tensor:
843
+ """Embeds box prompts."""
844
+ boxes = boxes + 0.5 # Shift to center of pixel
845
+ batch_size, nb_boxes = shape_list(boxes)[:2]
846
+ coords = tf.reshape(boxes, (batch_size, nb_boxes, 2, 2))
847
+ input_shape = (self.input_image_size, self.input_image_size)
848
+ corner_embedding = self.shared_embedding(coords, input_shape)
849
+ corner_embedding += tf.where(
850
+ tf.range(shape_list(corner_embedding)[2])[None, None, :, None] == 0,
851
+ self.point_embed[2][0],
852
+ self.point_embed[3][0],
853
+ )
854
+ return corner_embedding
855
+
856
+ def call(
857
+ self,
858
+ batch_size: Optional[int],
859
+ input_points: Optional[Tuple[tf.Tensor, tf.Tensor]],
860
+ input_labels: tf.Tensor | None,
861
+ input_boxes: tf.Tensor | None,
862
+ input_masks: tf.Tensor | None,
863
+ ) -> Tuple[tf.Tensor, tf.Tensor]:
864
+ """
865
+ Embeds different types of prompts, returning both sparse and dense embeddings.
866
+
867
+ Args:
868
+ points (`tf.Tensor`, *optional*):
869
+ point coordinates and labels to embed.
870
+ boxes (`tf.Tensor`, *optional*):
871
+ boxes to embed
872
+ masks (`tf.Tensor`, *optional*):
873
+ masks to embed
874
+ """
875
+ sparse_embeddings = None
876
+ if input_points is not None:
877
+ batch_size, point_batch_size = shape_list(input_points)[:2]
878
+ if input_labels is None:
879
+ raise ValueError("If points are provided, labels must also be provided.")
880
+ point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None))
881
+ sparse_embeddings = tf.zeros(
882
+ (batch_size, point_batch_size, 0, self.hidden_size), dtype=point_embeddings.dtype
883
+ )
884
+ sparse_embeddings = tf.concat([sparse_embeddings, point_embeddings], axis=2)
885
+ if input_boxes is not None:
886
+ batch_size = shape_list(input_boxes)[0]
887
+ box_embeddings = self._embed_boxes(input_boxes)
888
+ if sparse_embeddings is None:
889
+ sparse_embeddings = box_embeddings
890
+ else:
891
+ sparse_embeddings = tf.concat([sparse_embeddings, box_embeddings], axis=2)
892
+ if input_masks is not None:
893
+ dense_embeddings = self.mask_embed(input_masks)
894
+ else:
895
+ dense_embeddings = self.no_mask_embed[0]
896
+ dense_embeddings = tf.reshape(dense_embeddings, (1, -1, 1, 1))
897
+ dense_embeddings = tf.tile(
898
+ dense_embeddings, (batch_size, 1, self.image_embedding_size[0], self.image_embedding_size[1])
899
+ )
900
+ if sparse_embeddings is None:
901
+ sparse_embeddings = tf.zeros((batch_size, 0, 1, self.hidden_size), dtype=dense_embeddings.dtype)
902
+
903
+ return sparse_embeddings, dense_embeddings
904
+
905
+
906
+ class TFSamVisionAttention(keras.layers.Layer):
907
+ """Multi-head Attention block with relative position embeddings."""
908
+
909
+ def __init__(self, config, window_size, **kwargs):
910
+ super().__init__(**kwargs)
911
+ input_size = (
912
+ (config.image_size // config.patch_size, config.image_size // config.patch_size)
913
+ if window_size == 0
914
+ else (window_size, window_size)
915
+ )
916
+ self.input_size = input_size
917
+
918
+ self.num_attention_heads = config.num_attention_heads
919
+ head_dim = config.hidden_size // config.num_attention_heads
920
+ self.head_dim = head_dim
921
+ self.scale = head_dim**-0.5
922
+ self.dropout = config.attention_dropout
923
+
924
+ self.qkv = keras.layers.Dense(config.hidden_size * 3, use_bias=config.qkv_bias, name="qkv")
925
+ self.proj = keras.layers.Dense(config.hidden_size, name="proj")
926
+
927
+ self.use_rel_pos = config.use_rel_pos
928
+ if self.use_rel_pos:
929
+ if input_size is None:
930
+ raise ValueError("Input size must be provided if using relative positional encoding.")
931
+ self.config = config
932
+
933
+ def build(self, input_shape=None):
934
+ if self.input_size is not None:
935
+ # initialize relative positional embeddings
936
+ self.rel_pos_h = self.add_weight(
937
+ shape=(2 * self.input_size[0] - 1, self.head_dim), initializer="zeros", name="rel_pos_h"
938
+ )
939
+ self.rel_pos_w = self.add_weight(
940
+ shape=(2 * self.input_size[1] - 1, self.head_dim), initializer="zeros", name="rel_pos_w"
941
+ )
942
+
943
+ if self.built:
944
+ return
945
+ self.built = True
946
+ if getattr(self, "qkv", None) is not None:
947
+ with tf.name_scope(self.qkv.name):
948
+ self.qkv.build([None, None, self.config.hidden_size])
949
+ if getattr(self, "proj", None) is not None:
950
+ with tf.name_scope(self.proj.name):
951
+ self.proj.build([None, None, self.config.hidden_size])
952
+
953
+ def get_rel_pos(self, q_size: int, k_size: int, rel_pos: tf.Tensor) -> tf.Tensor:
954
+ """
955
+ Get relative positional embeddings according to the relative positions of
956
+ query and key sizes.
957
+
958
+ Args:
959
+ q_size (int):
960
+ size of the query.
961
+ k_size (int):
962
+ size of key k.
963
+ rel_pos (`tf.Tensor`):
964
+ relative position embeddings (L, channel).
965
+
966
+ Returns:
967
+ Extracted positional embeddings according to relative positions.
968
+ """
969
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
970
+ # Interpolate rel pos if needed.
971
+ if rel_pos.shape[0] != max_rel_dist:
972
+ # Interpolate rel pos.
973
+ rel_pos_resized = tf.image.resize(
974
+ tf.reshape(rel_pos, (1, rel_pos.shape[0], -1)),
975
+ size=(max_rel_dist, rel_pos.shape[1]),
976
+ method="bilinear",
977
+ )
978
+ rel_pos_resized = tf.reshape(rel_pos_resized, (-1, max_rel_dist))
979
+ else:
980
+ rel_pos_resized = rel_pos
981
+
982
+ # Scale the coords with short length if shapes for q and k are different.
983
+ q_coords = tf.expand_dims(tf.range(q_size, dtype=tf.float32), 1) * max(k_size / q_size, 1.0)
984
+ k_coords = tf.expand_dims(tf.range(k_size, dtype=tf.float32), 0) * max(q_size / k_size, 1.0)
985
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
986
+
987
+ return tf.gather(rel_pos_resized, tf.cast(relative_coords, tf.int32))
988
+
989
+ def add_decomposed_rel_pos(
990
+ self,
991
+ attn: tf.Tensor,
992
+ query: tf.Tensor,
993
+ rel_pos_h: tf.Tensor,
994
+ rel_pos_w: tf.Tensor,
995
+ q_size: Tuple[int, int],
996
+ k_size: Tuple[int, int],
997
+ ) -> tf.Tensor:
998
+ """
999
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
1000
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
1001
+
1002
+ Args:
1003
+ attn (`tf.Tensor`):
1004
+ attention map.
1005
+ query (`tf.Tensor`):
1006
+ query q in the attention layer with shape (batch_size, query_height * query_width, channel).
1007
+ rel_pos_h (`tf.Tensor`):
1008
+ relative position embeddings (Lh, channel) for height axis.
1009
+ rel_pos_w (`tf.Tensor`):
1010
+ relative position embeddings (Lw, channel) for width axis.
1011
+ q_size (tuple):
1012
+ spatial sequence size of query q with (query_height, query_width).
1013
+ k_size (tuple):
1014
+ spatial sequence size of key k with (key_height, key_width).
1015
+
1016
+ Returns:
1017
+ attn (`tf.Tensor`):
1018
+ attention map with added relative positional embeddings.
1019
+ """
1020
+ query_height, query_width = q_size
1021
+ key_height, key_width = k_size
1022
+ relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h)
1023
+ relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w)
1024
+
1025
+ batch_size, _, dim = shape_list(query)
1026
+ reshaped_query = tf.reshape(query, (batch_size, query_height, query_width, dim))
1027
+ rel_h = tf.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height)
1028
+ rel_w = tf.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width)
1029
+ attn = tf.reshape(attn, (batch_size, query_height, query_width, key_height, key_width))
1030
+ attn = attn + tf.expand_dims(rel_h, axis=-1) + tf.expand_dims(rel_w, axis=-2)
1031
+ attn = tf.reshape(attn, (batch_size, query_height * query_width, key_height * key_width))
1032
+ return attn
1033
+
1034
+ def call(self, hidden_states: tf.Tensor, output_attentions=False, training=False) -> tf.Tensor:
1035
+ batch_size, height, width, _ = shape_list(hidden_states)
1036
+ # qkv with shape (3, batch_size, nHead, height * width, channel)
1037
+ qkv = tf.reshape(self.qkv(hidden_states), (batch_size, height * width, 3, self.num_attention_heads, -1))
1038
+ qkv = tf.transpose(qkv, perm=(2, 0, 3, 1, 4))
1039
+ # q, k, v with shape (batch_size * nHead, height * width, channel)
1040
+ query, key, value = tf.unstack(
1041
+ tf.reshape(qkv, (3, batch_size * self.num_attention_heads, height * width, -1)), axis=0
1042
+ )
1043
+ attn_weights = tf.matmul(query * self.scale, key, transpose_b=True)
1044
+
1045
+ if self.use_rel_pos:
1046
+ attn_weights = self.add_decomposed_rel_pos(
1047
+ attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width)
1048
+ )
1049
+
1050
+ attn_weights = tf.nn.softmax(attn_weights, axis=-1)
1051
+
1052
+ if training:
1053
+ attn_probs = tf.nn.dropout(attn_weights, rate=self.dropout)
1054
+ else:
1055
+ attn_probs = attn_weights
1056
+
1057
+ attn_output = tf.reshape(attn_probs @ value, (batch_size, self.num_attention_heads, height, width, -1))
1058
+ attn_output = tf.transpose(attn_output, perm=(0, 2, 3, 1, 4))
1059
+ attn_output = tf.reshape(attn_output, (batch_size, height, width, self.config.hidden_size))
1060
+
1061
+ attn_output = self.proj(attn_output)
1062
+
1063
+ if output_attentions:
1064
+ outputs = (attn_output, attn_weights)
1065
+ else:
1066
+ outputs = (attn_output, None)
1067
+
1068
+ return outputs
1069
+
1070
+
1071
+ class TFSamVisionLayer(keras.layers.Layer):
1072
+ def __init__(self, config, window_size, **kwargs):
1073
+ super().__init__(**kwargs)
1074
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
1075
+ self.attn = TFSamVisionAttention(config, window_size, name="attn")
1076
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
1077
+ self.mlp = TFSamMLPBlock(config, name="mlp")
1078
+ self.window_size = window_size
1079
+ self.config = config
1080
+
1081
+ def window_partition(self, hidden_states: tf.Tensor, window_size: int) -> Tuple[tf.Tensor, Tuple[int, int]]:
1082
+ batch_size, height, width, channel = shape_list(hidden_states)
1083
+
1084
+ pad_h = (window_size - height % window_size) % window_size
1085
+ pad_w = (window_size - width % window_size) % window_size
1086
+ if pad_h > 0 or pad_w > 0:
1087
+ hidden_states = tf.pad(hidden_states, [[0, 0], [0, pad_h], [0, pad_w], [0, 0]])
1088
+ pad_height, pad_width = height + pad_h, width + pad_w
1089
+
1090
+ hidden_states = tf.reshape(
1091
+ hidden_states,
1092
+ [batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel],
1093
+ )
1094
+ windows = tf.reshape(
1095
+ tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [-1, window_size, window_size, channel]
1096
+ )
1097
+ return windows, (pad_height, pad_width)
1098
+
1099
+ def window_unpartition(
1100
+ self, windows: tf.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int]
1101
+ ) -> tf.Tensor:
1102
+ pad_height, pad_width = padding_shape
1103
+ height, width = original_shape
1104
+ batch_size = shape_list(windows)[0] // (pad_height * pad_width // window_size // window_size)
1105
+ hidden_states = tf.reshape(
1106
+ windows, [batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1]
1107
+ )
1108
+ hidden_states = tf.reshape(
1109
+ tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [batch_size, pad_height, pad_width, -1]
1110
+ )
1111
+
1112
+ if pad_height > height or pad_width > width:
1113
+ hidden_states = hidden_states[:, :height, :width, :]
1114
+ return hidden_states
1115
+
1116
+ def call(
1117
+ self,
1118
+ hidden_states: tf.Tensor,
1119
+ output_attentions: Optional[bool] = False,
1120
+ training: Optional[bool] = False,
1121
+ ) -> Tuple[tf.Tensor]:
1122
+ residual = hidden_states
1123
+
1124
+ hidden_states = self.layer_norm1(hidden_states)
1125
+ if self.window_size > 0:
1126
+ height, width = hidden_states.shape[1], hidden_states.shape[2]
1127
+ hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size)
1128
+
1129
+ hidden_states, attn_weights = self.attn(
1130
+ hidden_states=hidden_states,
1131
+ output_attentions=output_attentions,
1132
+ training=training,
1133
+ )
1134
+ if self.window_size > 0:
1135
+ hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width))
1136
+
1137
+ hidden_states = residual + hidden_states
1138
+ layernorm_output = self.layer_norm2(hidden_states)
1139
+ hidden_states = hidden_states + self.mlp(layernorm_output)
1140
+
1141
+ outputs = (hidden_states,)
1142
+ if output_attentions:
1143
+ outputs += (attn_weights,)
1144
+
1145
+ return outputs
1146
+
1147
+ def build(self, input_shape=None):
1148
+ if self.built:
1149
+ return
1150
+ self.built = True
1151
+ if getattr(self, "layer_norm1", None) is not None:
1152
+ with tf.name_scope(self.layer_norm1.name):
1153
+ self.layer_norm1.build([None, None, None, self.config.hidden_size])
1154
+ if getattr(self, "attn", None) is not None:
1155
+ with tf.name_scope(self.attn.name):
1156
+ self.attn.build(None)
1157
+ if getattr(self, "layer_norm2", None) is not None:
1158
+ with tf.name_scope(self.layer_norm2.name):
1159
+ self.layer_norm2.build([None, None, None, self.config.hidden_size])
1160
+ if getattr(self, "mlp", None) is not None:
1161
+ with tf.name_scope(self.mlp.name):
1162
+ self.mlp.build(None)
1163
+
1164
+
1165
+ class TFSamVisionNeck(keras.layers.Layer):
1166
+ def __init__(self, config: SamVisionConfig, **kwargs):
1167
+ super().__init__(**kwargs)
1168
+ self.config = config
1169
+
1170
+ self.conv1 = keras.layers.Conv2D(
1171
+ config.output_channels,
1172
+ kernel_size=1,
1173
+ use_bias=False,
1174
+ name="conv1",
1175
+ )
1176
+ self.layer_norm1 = TFSamLayerNorm(config.output_channels, name="layer_norm1")
1177
+ self.conv2 = keras.layers.Conv2D(
1178
+ config.output_channels,
1179
+ kernel_size=3,
1180
+ padding="same",
1181
+ use_bias=False,
1182
+ name="conv2",
1183
+ )
1184
+ self.layer_norm2 = TFSamLayerNorm(config.output_channels, name="layer_norm2")
1185
+
1186
+ def call(self, hidden_states):
1187
+ hidden_states = self.conv1(hidden_states)
1188
+ hidden_states = self.layer_norm1(hidden_states)
1189
+
1190
+ hidden_states = self.conv2(hidden_states)
1191
+ hidden_states = self.layer_norm2(hidden_states)
1192
+ hidden_states = tf.transpose(hidden_states, perm=[0, 3, 1, 2])
1193
+ return hidden_states
1194
+
1195
+ def build(self, input_shape=None):
1196
+ if self.built:
1197
+ return
1198
+ self.built = True
1199
+ if getattr(self, "conv1", None) is not None:
1200
+ with tf.name_scope(self.conv1.name):
1201
+ self.conv1.build([None, None, None, self.config.hidden_size])
1202
+ if getattr(self, "layer_norm1", None) is not None:
1203
+ with tf.name_scope(self.layer_norm1.name):
1204
+ self.layer_norm1.build(None)
1205
+ if getattr(self, "conv2", None) is not None:
1206
+ with tf.name_scope(self.conv2.name):
1207
+ self.conv2.build([None, None, None, self.config.output_channels])
1208
+ if getattr(self, "layer_norm2", None) is not None:
1209
+ with tf.name_scope(self.layer_norm2.name):
1210
+ self.layer_norm2.build(None)
1211
+
1212
+
1213
+ class TFSamVisionEncoder(keras.layers.Layer):
1214
+ def __init__(self, config: SamVisionConfig, **kwargs):
1215
+ super().__init__(**kwargs)
1216
+ self.config = config
1217
+ self.image_size = config.image_size
1218
+
1219
+ self.patch_embed = TFSamPatchEmbeddings(config, name="patch_embed")
1220
+
1221
+ self.pos_embed = None
1222
+
1223
+ self.layers = []
1224
+ for i in range(config.num_hidden_layers):
1225
+ layer = TFSamVisionLayer(
1226
+ config,
1227
+ window_size=config.window_size if i not in config.global_attn_indexes else 0,
1228
+ name=f"layers_._{i}",
1229
+ )
1230
+ self.layers.append(layer)
1231
+
1232
+ self.neck = TFSamVisionNeck(config, name="neck")
1233
+
1234
+ def build(self, input_shape=None):
1235
+ if self.built:
1236
+ return
1237
+ self.built = True
1238
+ if self.config.use_abs_pos:
1239
+ # Initialize absolute positional embedding with pretrain image size.
1240
+ self.pos_embed = self.add_weight(
1241
+ shape=[
1242
+ 1,
1243
+ self.config.image_size // self.config.patch_size,
1244
+ self.config.image_size // self.config.patch_size,
1245
+ self.config.hidden_size,
1246
+ ],
1247
+ initializer="zeros",
1248
+ trainable=True,
1249
+ name="pos_embed",
1250
+ )
1251
+
1252
+ if getattr(self, "patch_embed", None) is not None:
1253
+ with tf.name_scope(self.patch_embed.name):
1254
+ self.patch_embed.build(None)
1255
+ if getattr(self, "neck", None) is not None:
1256
+ with tf.name_scope(self.neck.name):
1257
+ self.neck.build(None)
1258
+ for layer in self.layers:
1259
+ with tf.name_scope(layer.name):
1260
+ layer.build(None)
1261
+
1262
+ def get_input_embeddings(self):
1263
+ return self.patch_embed
1264
+
1265
+ def call(
1266
+ self,
1267
+ pixel_values: tf.Tensor | None = None,
1268
+ output_attentions: Optional[bool] = None,
1269
+ output_hidden_states: Optional[bool] = None,
1270
+ return_dict: Optional[bool] = None,
1271
+ training: Optional[bool] = False,
1272
+ ) -> Union[Tuple, TFSamVisionEncoderOutput]:
1273
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1274
+ output_hidden_states = (
1275
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1276
+ )
1277
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1278
+
1279
+ if pixel_values is None:
1280
+ raise ValueError("You have to specify pixel_values")
1281
+
1282
+ hidden_states = self.patch_embed(pixel_values)
1283
+ if self.pos_embed is not None:
1284
+ hidden_states = hidden_states + self.pos_embed
1285
+
1286
+ all_hidden_states = () if output_hidden_states else None
1287
+ all_self_attentions = () if output_attentions else None
1288
+
1289
+ for i, layer_module in enumerate(self.layers):
1290
+ if output_hidden_states:
1291
+ all_hidden_states = all_hidden_states + (hidden_states,)
1292
+
1293
+ layer_outputs = layer_module(hidden_states, output_attentions=output_attentions, training=training)
1294
+
1295
+ hidden_states = layer_outputs[0]
1296
+
1297
+ if output_attentions:
1298
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
1299
+
1300
+ if output_hidden_states:
1301
+ all_hidden_states = all_hidden_states + (hidden_states,)
1302
+
1303
+ hidden_states = self.neck(hidden_states)
1304
+
1305
+ if not return_dict:
1306
+ outputs = (hidden_states,)
1307
+ if output_hidden_states:
1308
+ outputs = outputs + (all_hidden_states,)
1309
+ if output_attentions:
1310
+ outputs = outputs + (all_self_attentions,)
1311
+ return outputs
1312
+
1313
+ return TFSamVisionEncoderOutput(
1314
+ last_hidden_state=hidden_states,
1315
+ hidden_states=all_hidden_states,
1316
+ attentions=all_self_attentions,
1317
+ )
1318
+
1319
+
1320
+ class TFSamPreTrainedModel(TFPreTrainedModel):
1321
+ config_class = SamConfig
1322
+ base_model_prefix = "sam"
1323
+ main_input_name = "pixel_values"
1324
+
1325
+
1326
+ SAM_START_DOCSTRING = r"""
1327
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1328
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1329
+ etc.)
1330
+
1331
+ This model is also a TensorFlow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
1332
+ subclass. Use it as a regular TensorFlow Model and refer to the TensorFlow documentation for all matter related to
1333
+ general usage and behavior.
1334
+
1335
+ Parameters:
1336
+ config ([`SamConfig`]): Model configuration class with all the parameters of the model.
1337
+ Initializing with a config file does not load the weights associated with the model, only the
1338
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
1339
+ """
1340
+
1341
+
1342
+ SAM_INPUTS_DOCSTRING = r"""
1343
+ Args:
1344
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
1345
+ Pixel values. Pixel values can be obtained using [`SamProcessor`]. See [`SamProcessor.__call__`] for
1346
+ details.
1347
+ input_points (`tf.Tensor` of shape `(batch_size, num_points, 2)`):
1348
+ Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
1349
+ better results. The points can be obtained by passing a list of list of list to the processor that will
1350
+ create corresponding `tf` tensors of dimension 4. The first dimension is the image batch size, the second
1351
+ dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict per
1352
+ input point), the third dimension is the number of points per segmentation mask (it is possible to pass
1353
+ multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
1354
+ coordinates of the point. If a different number of points is passed either for each image, or for each
1355
+ mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
1356
+ computation of the embedding will be skipped for these points using the labels.
1357
+ input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points)`):
1358
+ Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
1359
+ official implementation, there are 3 types of labels
1360
+
1361
+ - `1`: the point is a point that contains the object of interest
1362
+ - `0`: the point is a point that does not contain the object of interest
1363
+ - `-1`: the point corresponds to the background
1364
+
1365
+ We added the label:
1366
+
1367
+ - `-10`: the point is a padding point, thus should be ignored by the prompt encoder
1368
+
1369
+ The padding labels should be automatically done by the processor.
1370
+ input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes, 4)`):
1371
+ Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
1372
+ much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
1373
+ that will generate a `tf` tensor, with each dimension corresponding respectively to the image batch size,
1374
+ the number of boxes per image and the coordinates of the top left and botton right point of the box. In the
1375
+ order (`x1`, `y1`, `x2`, `y2`):
1376
+
1377
+ - `x1`: the x coordinate of the top left point of the input box
1378
+ - `y1`: the y coordinate of the top left point of the input box
1379
+ - `x2`: the x coordinate of the bottom right point of the input box
1380
+ - `y2`: the y coordinate of the bottom right point of the input box
1381
+
1382
+ input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`):
1383
+ SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
1384
+ generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
1385
+ manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
1386
+
1387
+ image_embeddings (`tf.Tensor` of shape `(batch_size, output_channels, window_size, window_size)`):
1388
+ Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory
1389
+ efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
1390
+ method, and then feed them to the `call` method instead of feeding the `pixel_values`.
1391
+ multimask_output (`bool`, *optional*):
1392
+ In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
1393
+ bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
1394
+ "best" mask, by specifying `multimask_output=False`.
1395
+ output_attentions (`bool`, *optional*):
1396
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1397
+ tensors for more detail.
1398
+ output_hidden_states (`bool`, *optional*):
1399
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1400
+ more detail.
1401
+ return_dict (`bool`, *optional*):
1402
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1403
+ """
1404
+
1405
+
1406
+ @add_start_docstrings(
1407
+ "Segment Anything Model (SAM) for generating segmentation masks, given an input image and ",
1408
+ " optional 2D location and bounding boxes.",
1409
+ SAM_START_DOCSTRING,
1410
+ )
1411
+ class TFSamModel(TFSamPreTrainedModel):
1412
+ _keys_to_ignore_on_load_missing = [r"prompt_encoder.shared_embedding.positional_embedding"]
1413
+
1414
+ def __init__(self, config, **kwargs):
1415
+ super().__init__(config, **kwargs)
1416
+ self.shared_image_embedding = TFSamPositionalEmbedding(config.vision_config, name="shared_image_embedding")
1417
+
1418
+ self.vision_encoder = TFSamVisionEncoder(config.vision_config, name="vision_encoder")
1419
+ self.prompt_encoder = TFSamPromptEncoder(
1420
+ config.prompt_encoder_config, self.shared_image_embedding, name="prompt_encoder"
1421
+ )
1422
+ self.mask_decoder = TFSamMaskDecoder(config.mask_decoder_config, name="mask_decoder")
1423
+ self.config = config
1424
+
1425
+ def get_input_embeddings(self):
1426
+ return self.vision_encoder.get_input_embeddings()
1427
+
1428
+ def get_image_wide_positional_embeddings(self):
1429
+ size = self.config.prompt_encoder_config.image_embedding_size
1430
+ grid = tf.ones((size, size))
1431
+ y_embed = tf.math.cumsum(grid, axis=0) - 0.5
1432
+ x_embed = tf.math.cumsum(grid, axis=1) - 0.5
1433
+ y_embed = y_embed / size
1434
+ x_embed = x_embed / size
1435
+
1436
+ positional_embedding = self.shared_image_embedding(tf.stack([x_embed, y_embed], axis=-1))
1437
+ return tf.expand_dims(tf.transpose(positional_embedding, perm=[2, 0, 1]), axis=0) # channel x height x width
1438
+
1439
+ def get_image_embeddings(
1440
+ self,
1441
+ pixel_values,
1442
+ output_attentions: Optional[bool] = None,
1443
+ output_hidden_states: Optional[bool] = None,
1444
+ return_dict: Optional[bool] = None,
1445
+ ):
1446
+ r"""
1447
+ Returns the image embeddings by passing the pixel values through the vision encoder.
1448
+
1449
+ Args:
1450
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
1451
+ Input pixel values
1452
+ output_attentions (`bool`, *optional*):
1453
+ Whether or not to return the attentions tensors of all attention layers.
1454
+ output_hidden_states (`bool`, *optional*):
1455
+ Whether or not to return the hidden states of all layers.
1456
+ return_dict (`bool`, *optional*):
1457
+ Whether or not to return a [`~utils.TFModelOutput`] instead of a plain tuple.
1458
+
1459
+ """
1460
+ vision_output = self.vision_encoder(
1461
+ pixel_values,
1462
+ output_attentions=output_attentions,
1463
+ output_hidden_states=output_hidden_states,
1464
+ return_dict=return_dict,
1465
+ )
1466
+ image_embeddings = vision_output[0]
1467
+ return image_embeddings
1468
+
1469
+ def get_prompt_embeddings(
1470
+ self,
1471
+ input_points: tf.Tensor | None = None,
1472
+ input_labels: tf.Tensor | None = None,
1473
+ input_boxes: tf.Tensor | None = None,
1474
+ input_masks: tf.Tensor | None = None,
1475
+ ):
1476
+ r"""
1477
+ Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder.
1478
+
1479
+ Args:
1480
+ input_points (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`):
1481
+ Optional input points for the prompt encoder. The padding of the point is automatically done by the
1482
+ processor. `point_batch_size` refers to the number of masks that we want the model to predict per
1483
+ point. The model will output `point_batch_size` times 3 masks in total.
1484
+ input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image)`):
1485
+ Optional input labels for the prompt encoder. The padding of the labels is automatically done by the
1486
+ processor, or can be fed by the user.
1487
+ input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes_per_image, 4)`):
1488
+ Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the
1489
+ processor. users can also pass manually the input boxes.
1490
+ input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`):
1491
+ Optional input masks for the prompt encoder.
1492
+ """
1493
+ prompt_output = self.prompt_encoder(
1494
+ input_points=input_points,
1495
+ input_labels=input_labels,
1496
+ input_boxes=input_boxes,
1497
+ input_masks=input_masks,
1498
+ )
1499
+ return prompt_output
1500
+
1501
+ @unpack_inputs
1502
+ @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING)
1503
+ def call(
1504
+ self,
1505
+ pixel_values: TFModelInputType | None = None,
1506
+ input_points: tf.Tensor | None = None,
1507
+ input_labels: tf.Tensor | None = None,
1508
+ input_boxes: tf.Tensor | None = None,
1509
+ input_masks: tf.Tensor | None = None,
1510
+ image_embeddings: tf.Tensor | None = None,
1511
+ multimask_output: bool = True,
1512
+ output_attentions: bool | None = None,
1513
+ output_hidden_states: bool | None = None,
1514
+ return_dict: bool | None = None,
1515
+ training: bool = False,
1516
+ **kwargs,
1517
+ ) -> TFSamImageSegmentationOutput | Tuple[tf.Tensor]:
1518
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1519
+ output_hidden_states = (
1520
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1521
+ )
1522
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1523
+
1524
+ if pixel_values is None and image_embeddings is None:
1525
+ raise ValueError("Either pixel_values or image_embeddings must be provided.")
1526
+
1527
+ if pixel_values is not None and image_embeddings is not None:
1528
+ raise ValueError("Only one of pixel_values and image_embeddings can be provided.")
1529
+
1530
+ if input_points is not None and len(input_points.shape) != 4:
1531
+ raise ValueError(
1532
+ "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.",
1533
+ " got {}.".format(input_points.shape),
1534
+ )
1535
+ if input_boxes is not None and len(input_boxes.shape) != 3:
1536
+ raise ValueError(
1537
+ "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.",
1538
+ " got {}.".format(input_boxes.shape),
1539
+ )
1540
+ if input_points is not None and input_boxes is not None:
1541
+ point_batch_size = shape_list(input_points)[1]
1542
+ box_batch_size = shape_list(input_boxes)[1]
1543
+ if point_batch_size != box_batch_size:
1544
+ raise ValueError(
1545
+ "You should provide as many bounding boxes as input points per box. Got {} and {}.".format(
1546
+ point_batch_size, box_batch_size
1547
+ )
1548
+ )
1549
+ if pixel_values is not None:
1550
+ # Ensures that later checks pass even with an all-None shape from the serving signature
1551
+ pixel_values = tf.ensure_shape(
1552
+ pixel_values,
1553
+ [
1554
+ None,
1555
+ self.config.vision_config.num_channels,
1556
+ self.config.vision_config.image_size,
1557
+ self.config.vision_config.image_size,
1558
+ ],
1559
+ )
1560
+ image_positional_embeddings = self.get_image_wide_positional_embeddings()
1561
+ # repeat with batch size
1562
+ batch_size = shape_list(pixel_values)[0] if pixel_values is not None else shape_list(image_embeddings)[0]
1563
+ image_positional_embeddings = tf.repeat(image_positional_embeddings, batch_size, axis=0)
1564
+
1565
+ vision_attentions = None
1566
+ vision_hidden_states = None
1567
+
1568
+ if pixel_values is not None:
1569
+ vision_outputs = self.vision_encoder(
1570
+ pixel_values,
1571
+ output_attentions=output_attentions,
1572
+ output_hidden_states=output_hidden_states,
1573
+ return_dict=True,
1574
+ training=training,
1575
+ )
1576
+ image_embeddings = vision_outputs["last_hidden_state"]
1577
+
1578
+ if output_hidden_states:
1579
+ vision_hidden_states = vision_outputs["hidden_states"]
1580
+ if output_attentions:
1581
+ vision_attentions = vision_outputs["attentions"]
1582
+
1583
+ if input_points is not None and input_labels is None:
1584
+ input_labels = tf.ones_like(input_points[:, :, :, 0], dtype=tf.int32)
1585
+
1586
+ if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]:
1587
+ raise ValueError(
1588
+ "The batch size of the image embeddings and the input points must be the same. ",
1589
+ "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]),
1590
+ " if you want to pass multiple points for the same image, make sure that you passed ",
1591
+ " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ",
1592
+ " input_labels of shape (batch_size, point_batch_size, num_points_per_image)",
1593
+ )
1594
+
1595
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
1596
+ batch_size=shape_list(image_embeddings)[0],
1597
+ input_points=input_points,
1598
+ input_labels=input_labels,
1599
+ input_boxes=input_boxes,
1600
+ input_masks=input_masks,
1601
+ )
1602
+
1603
+ low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder(
1604
+ image_embeddings=image_embeddings,
1605
+ image_positional_embeddings=image_positional_embeddings,
1606
+ sparse_prompt_embeddings=sparse_embeddings,
1607
+ dense_prompt_embeddings=dense_embeddings,
1608
+ multimask_output=multimask_output,
1609
+ output_attentions=output_attentions,
1610
+ )
1611
+
1612
+ if not return_dict:
1613
+ output = (iou_predictions, low_res_masks)
1614
+ if output_hidden_states:
1615
+ output = output + (vision_hidden_states,)
1616
+
1617
+ if output_attentions:
1618
+ output = output + (vision_attentions, mask_decoder_attentions)
1619
+ return output
1620
+
1621
+ return TFSamImageSegmentationOutput(
1622
+ iou_scores=iou_predictions,
1623
+ pred_masks=low_res_masks,
1624
+ vision_hidden_states=vision_hidden_states,
1625
+ vision_attentions=vision_attentions,
1626
+ mask_decoder_attentions=mask_decoder_attentions,
1627
+ )
1628
+
1629
+ def serving_output(self, output: TFSamImageSegmentationOutput) -> TFSamImageSegmentationOutput:
1630
+ hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None
1631
+ attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None
1632
+
1633
+ return TFSamImageSegmentationOutput(
1634
+ iou_scores=output.iou_scores,
1635
+ pred_masks=output.pred_masks,
1636
+ vision_hidden_states=hs if self.config.output_hidden_states else None,
1637
+ vision_attentions=attns if self.config.output_attentions else None,
1638
+ mask_decoder_attentions=output.mask_decoder_attentions if self.config.output_attentions else None,
1639
+ )
1640
+
1641
+ def build(self, input_shape=None):
1642
+ if self.built:
1643
+ return
1644
+ self.built = True
1645
+ if getattr(self, "shared_image_embedding", None) is not None:
1646
+ with tf.name_scope(self.shared_image_embedding.name):
1647
+ self.shared_image_embedding.build(None)
1648
+ if getattr(self, "vision_encoder", None) is not None:
1649
+ with tf.name_scope(self.vision_encoder.name):
1650
+ self.vision_encoder.build(None)
1651
+ if getattr(self, "prompt_encoder", None) is not None:
1652
+ with tf.name_scope(self.prompt_encoder.name):
1653
+ self.prompt_encoder.build(None)
1654
+ if getattr(self, "mask_decoder", None) is not None:
1655
+ with tf.name_scope(self.mask_decoder.name):
1656
+ self.mask_decoder.build(None)