applied-ai-018 commited on
Commit
826f2ea
·
verified ·
1 Parent(s): c6434cd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__init__.py +69 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py +66 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py +241 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py +49 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/convert_fsmt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py +219 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py +280 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py +1386 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py +519 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__init__.py +45 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py +644 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py +158 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/configuration_idefics.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/perceiver.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/configuration_idefics.py +327 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/image_processing_idefics.py +168 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/modeling_idefics.py +1588 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/processing_idefics.py +408 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/vision.py +490 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__init__.py +69 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/convert_instructblip_original_to_pytorch.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/modeling_instructblip.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/configuration_instructblip.py +358 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py +303 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/modeling_instructblip.py +1567 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/processing_instructblip.py +173 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py +117 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py +170 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py +60 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py +1434 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py +1656 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py +503 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py +169 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/configuration_squeezebert.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
21
+ "tokenization_canine": ["CanineTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_canine"] = [
31
+ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "CanineForMultipleChoice",
33
+ "CanineForQuestionAnswering",
34
+ "CanineForSequenceClassification",
35
+ "CanineForTokenClassification",
36
+ "CanineLayer",
37
+ "CanineModel",
38
+ "CaninePreTrainedModel",
39
+ "load_tf_weights_in_canine",
40
+ ]
41
+
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
45
+ from .tokenization_canine import CanineTokenizer
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_canine import (
54
+ CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ CanineForMultipleChoice,
56
+ CanineForQuestionAnswering,
57
+ CanineForSequenceClassification,
58
+ CanineForTokenClassification,
59
+ CanineLayer,
60
+ CanineModel,
61
+ CaninePreTrainedModel,
62
+ load_tf_weights_in_canine,
63
+ )
64
+
65
+
66
+ else:
67
+ import sys
68
+
69
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert CANINE checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from transformers import CanineConfig, CanineModel, CanineTokenizer, load_tf_weights_in_canine
21
+ from transformers.utils import logging
22
+
23
+
24
+ logging.set_verbosity_info()
25
+
26
+
27
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path):
28
+ # Initialize PyTorch model
29
+ config = CanineConfig()
30
+ model = CanineModel(config)
31
+ model.eval()
32
+
33
+ print(f"Building PyTorch model from configuration: {config}")
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_canine(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model (weights and configuration)
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ model.save_pretrained(pytorch_dump_path)
41
+
42
+ # Save tokenizer files
43
+ tokenizer = CanineTokenizer()
44
+ print(f"Save tokenizer files to {pytorch_dump_path}")
45
+ tokenizer.save_pretrained(pytorch_dump_path)
46
+
47
+
48
+ if __name__ == "__main__":
49
+ parser = argparse.ArgumentParser()
50
+ # Required parameters
51
+ parser.add_argument(
52
+ "--tf_checkpoint_path",
53
+ default=None,
54
+ type=str,
55
+ required=True,
56
+ help="Path to the TensorFlow checkpoint. Should end with model.ckpt",
57
+ )
58
+ parser.add_argument(
59
+ "--pytorch_dump_path",
60
+ default=None,
61
+ type=str,
62
+ required=True,
63
+ help="Path to a folder where the PyTorch model will be placed.",
64
+ )
65
+ args = parser.parse_args()
66
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CANINE."""
16
+
17
+ from typing import Dict, List, Optional
18
+
19
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ # Unicode defines 1,114,112 total “codepoints”
27
+ UNICODE_VOCAB_SIZE = 1114112
28
+
29
+ # Below: Constants defining canonical codepoints for special, pseudo-characters.
30
+ # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
31
+ PAD = 0
32
+ CLS = 0xE000
33
+ SEP = 0xE001
34
+ BOS = 0xE002
35
+ MASK = 0xE003
36
+ RESERVED = 0xE004
37
+
38
+ # Maps special codepoints to human-readable names.
39
+ SPECIAL_CODEPOINTS: Dict[int, str] = {
40
+ # Special symbols are represented using codepoints values that are valid,
41
+ # but designated as "Private Use", meaning that they will never be assigned
42
+ # characters by the Unicode Consortium, and are thus safe for use here.
43
+ #
44
+ # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
45
+ # excluded and should fail with a hard error.
46
+ CLS: "[CLS]",
47
+ SEP: "[SEP]",
48
+ BOS: "[BOS]",
49
+ MASK: "[MASK]",
50
+ PAD: "[PAD]",
51
+ RESERVED: "[RESERVED]",
52
+ }
53
+
54
+ # Maps special codepoint human-readable names to their codepoint values.
55
+ SPECIAL_CODEPOINTS_BY_NAME: Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
56
+
57
+
58
+ class CanineTokenizer(PreTrainedTokenizer):
59
+ r"""
60
+ Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then
61
+ converts each character into its Unicode code point.
62
+
63
+ [`CanineTokenizer`] inherits from [`PreTrainedTokenizer`].
64
+
65
+ Refer to superclass [`PreTrainedTokenizer`] for usage examples and documentation concerning parameters.
66
+
67
+ Args:
68
+ model_max_length (`int`, *optional*, defaults to 2048):
69
+ The maximum sentence length the model accepts.
70
+ """
71
+
72
+ def __init__(
73
+ self,
74
+ bos_token=chr(CLS),
75
+ eos_token=chr(SEP),
76
+ sep_token=chr(SEP),
77
+ cls_token=chr(CLS),
78
+ pad_token=chr(PAD),
79
+ mask_token=chr(MASK),
80
+ add_prefix_space=False,
81
+ model_max_length=2048,
82
+ **kwargs,
83
+ ):
84
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
85
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
86
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
87
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
88
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
89
+
90
+ # Mask token behave like a normal word, i.e. include the space before it
91
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
92
+
93
+ # Creates a mapping for looking up the IDs of special symbols.
94
+ self._special_codepoints: Dict[str, int] = {}
95
+ for codepoint, name in SPECIAL_CODEPOINTS.items():
96
+ self._special_codepoints[name] = codepoint
97
+
98
+ # Creates a mapping for looking up the string forms of special symbol IDs.
99
+ self._special_codepoint_strings: Dict[int, str] = {
100
+ codepoint: name for name, codepoint in self._special_codepoints.items()
101
+ }
102
+
103
+ self._unicode_vocab_size = UNICODE_VOCAB_SIZE
104
+ self._num_special_tokens = len(self._special_codepoints)
105
+
106
+ super().__init__(
107
+ bos_token=bos_token,
108
+ eos_token=eos_token,
109
+ sep_token=sep_token,
110
+ cls_token=cls_token,
111
+ pad_token=pad_token,
112
+ mask_token=mask_token,
113
+ add_prefix_space=add_prefix_space,
114
+ model_max_length=model_max_length,
115
+ **kwargs,
116
+ )
117
+
118
+ @property
119
+ def vocab_size(self) -> int:
120
+ return self._unicode_vocab_size
121
+
122
+ def get_vocab(self):
123
+ vocab = {chr(i): i for i in range(self.vocab_size)}
124
+ vocab.update(self.added_tokens_encoder)
125
+ return vocab
126
+
127
+ def _tokenize(self, text: str) -> List[str]:
128
+ """Tokenize a string (i.e. perform character splitting)."""
129
+ return list(text)
130
+
131
+ def _convert_token_to_id(self, token: str) -> int:
132
+ """Converts a token (i.e. a Unicode character) in an id (i.e. its integer Unicode code point value)."""
133
+ try:
134
+ return ord(token)
135
+ except TypeError:
136
+ raise ValueError(f"invalid token: '{token}'")
137
+
138
+ def _convert_id_to_token(self, index: int) -> str:
139
+ """
140
+ Converts a Unicode code point (integer) in a token (str). In case it's a special code point, convert to
141
+ human-readable format.
142
+ """
143
+ try:
144
+ if index in SPECIAL_CODEPOINTS:
145
+ return SPECIAL_CODEPOINTS[index]
146
+ return chr(index)
147
+ except TypeError:
148
+ raise ValueError(f"invalid id: {index}")
149
+
150
+ def convert_tokens_to_string(self, tokens):
151
+ return "".join(tokens)
152
+
153
+ def build_inputs_with_special_tokens(
154
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
155
+ ) -> List[int]:
156
+ """
157
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
158
+ adding special tokens. A CANINE sequence has the following format:
159
+
160
+ - single sequence: `[CLS] X [SEP]`
161
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
162
+
163
+ Args:
164
+ token_ids_0 (`List[int]`):
165
+ List of IDs to which the special tokens will be added.
166
+ token_ids_1 (`List[int]`, *optional*):
167
+ Optional second list of IDs for sequence pairs.
168
+
169
+ Returns:
170
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
171
+ """
172
+ sep = [self.sep_token_id]
173
+ cls = [self.cls_token_id]
174
+
175
+ result = cls + token_ids_0 + sep
176
+ if token_ids_1 is not None:
177
+ result += token_ids_1 + sep
178
+ return result
179
+
180
+ def get_special_tokens_mask(
181
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
182
+ ) -> List[int]:
183
+ """
184
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
185
+ special tokens using the tokenizer `prepare_for_model` method.
186
+
187
+ Args:
188
+ token_ids_0 (`List[int]`):
189
+ List of IDs.
190
+ token_ids_1 (`List[int]`, *optional*):
191
+ Optional second list of IDs for sequence pairs.
192
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
193
+ Whether or not the token list is already formatted with special tokens for the model.
194
+
195
+ Returns:
196
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
197
+ """
198
+ if already_has_special_tokens:
199
+ return super().get_special_tokens_mask(
200
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
201
+ )
202
+
203
+ result = [1] + ([0] * len(token_ids_0)) + [1]
204
+ if token_ids_1 is not None:
205
+ result += ([0] * len(token_ids_1)) + [1]
206
+ return result
207
+
208
+ def create_token_type_ids_from_sequences(
209
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
210
+ ) -> List[int]:
211
+ """
212
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A CANINE
213
+ sequence pair mask has the following format:
214
+
215
+ ```
216
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
217
+ | first sequence | second sequence |
218
+ ```
219
+
220
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
230
+ """
231
+ sep = [self.sep_token_id]
232
+ cls = [self.cls_token_id]
233
+
234
+ result = len(cls + token_ids_0 + sep) * [0]
235
+ if token_ids_1 is not None:
236
+ result += len(token_ids_1 + sep) * [1]
237
+ return result
238
+
239
+ # CanineTokenizer has no vocab file
240
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None):
241
+ return ()
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig"],
22
+ "tokenization_fsmt": ["FSMTTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_fsmt"] = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]
32
+
33
+
34
+ if TYPE_CHECKING:
35
+ from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig
36
+ from .tokenization_fsmt import FSMTTokenizer
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
45
+
46
+ else:
47
+ import sys
48
+
49
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (938 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc ADDED
Binary file (8.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/convert_fsmt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc ADDED
Binary file (38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ FSMT configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class DecoderConfig(PretrainedConfig):
29
+ r"""
30
+ Configuration class for FSMT's decoder specific things. note: this is a private helper class
31
+ """
32
+
33
+ model_type = "fsmt_decoder"
34
+
35
+ def __init__(self, vocab_size=0, bos_token_id=0):
36
+ super().__init__()
37
+ self.vocab_size = vocab_size
38
+ self.bos_token_id = bos_token_id
39
+
40
+
41
+ class FSMTConfig(PretrainedConfig):
42
+ r"""
43
+ This is the configuration class to store the configuration of a [`FSMTModel`]. It is used to instantiate a FSMT
44
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
45
+ defaults will yield a similar configuration to that of the FSMT
46
+ [facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) architecture.
47
+
48
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
49
+ documentation from [`PretrainedConfig`] for more information.
50
+
51
+ Args:
52
+ langs (`List[str]`):
53
+ A list with source language and target_language (e.g., ['en', 'ru']).
54
+ src_vocab_size (`int`):
55
+ Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the
56
+ `inputs_ids` passed to the forward method in the encoder.
57
+ tgt_vocab_size (`int`):
58
+ Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the
59
+ `inputs_ids` passed to the forward method in the decoder.
60
+ d_model (`int`, *optional*, defaults to 1024):
61
+ Dimensionality of the layers and the pooler layer.
62
+ encoder_layers (`int`, *optional*, defaults to 12):
63
+ Number of encoder layers.
64
+ decoder_layers (`int`, *optional*, defaults to 12):
65
+ Number of decoder layers.
66
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
67
+ Number of attention heads for each attention layer in the Transformer encoder.
68
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
69
+ Number of attention heads for each attention layer in the Transformer decoder.
70
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
71
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
72
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
73
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
74
+ activation_function (`str` or `Callable`, *optional*, defaults to `"relu"`):
75
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
76
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
77
+ dropout (`float`, *optional*, defaults to 0.1):
78
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
79
+ attention_dropout (`float`, *optional*, defaults to 0.0):
80
+ The dropout ratio for the attention probabilities.
81
+ activation_dropout (`float`, *optional*, defaults to 0.0):
82
+ The dropout ratio for activations inside the fully connected layer.
83
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
84
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
85
+ just in case (e.g., 512 or 1024 or 2048).
86
+ init_std (`float`, *optional*, defaults to 0.02):
87
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
88
+ scale_embedding (`bool`, *optional*, defaults to `True`):
89
+ Scale embeddings by diving by sqrt(d_model).
90
+ bos_token_id (`int`, *optional*, defaults to 0)
91
+ Beginning of stream token id.
92
+ pad_token_id (`int`, *optional*, defaults to 1)
93
+ Padding token id.
94
+ eos_token_id (`int`, *optional*, defaults to 2)
95
+ End of stream token id.
96
+ decoder_start_token_id (`int`, *optional*):
97
+ This model starts decoding with `eos_token_id`
98
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
99
+ Google "layerdrop arxiv", as its not explainable in one line.
100
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
101
+ Google "layerdrop arxiv", as its not explainable in one line.
102
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
103
+ Whether this is an encoder/decoder model.
104
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
105
+ Whether to tie input and output embeddings.
106
+ num_beams (`int`, *optional*, defaults to 5)
107
+ Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
108
+ no beam search.
109
+ length_penalty (`float`, *optional*, defaults to 1)
110
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
111
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
112
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
113
+ `length_penalty` < 0.0 encourages shorter sequences.
114
+ early_stopping (`bool`, *optional*, defaults to `False`)
115
+ Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
116
+ when at least `num_beams` sentences are finished per batch or not.
117
+ use_cache (`bool`, *optional*, defaults to `True`):
118
+ Whether or not the model should return the last key/values attentions (not used by all models).
119
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
120
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
121
+ `eos_token_id`.
122
+
123
+ Examples:
124
+
125
+ ```python
126
+ >>> from transformers import FSMTConfig, FSMTModel
127
+
128
+ >>> # Initializing a FSMT facebook/wmt19-en-ru style configuration
129
+ >>> config = FSMTConfig()
130
+
131
+ >>> # Initializing a model (with random weights) from the configuration
132
+ >>> model = FSMTModel(config)
133
+
134
+ >>> # Accessing the model configuration
135
+ >>> configuration = model.config
136
+ ```"""
137
+
138
+ model_type = "fsmt"
139
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
140
+
141
+ # update the defaults from config file
142
+ def __init__(
143
+ self,
144
+ langs=["en", "de"],
145
+ src_vocab_size=42024,
146
+ tgt_vocab_size=42024,
147
+ activation_function="relu",
148
+ d_model=1024,
149
+ max_length=200,
150
+ max_position_embeddings=1024,
151
+ encoder_ffn_dim=4096,
152
+ encoder_layers=12,
153
+ encoder_attention_heads=16,
154
+ encoder_layerdrop=0.0,
155
+ decoder_ffn_dim=4096,
156
+ decoder_layers=12,
157
+ decoder_attention_heads=16,
158
+ decoder_layerdrop=0.0,
159
+ attention_dropout=0.0,
160
+ dropout=0.1,
161
+ activation_dropout=0.0,
162
+ init_std=0.02,
163
+ decoder_start_token_id=2,
164
+ is_encoder_decoder=True,
165
+ scale_embedding=True,
166
+ tie_word_embeddings=False,
167
+ num_beams=5,
168
+ length_penalty=1.0,
169
+ early_stopping=False,
170
+ use_cache=True,
171
+ pad_token_id=1,
172
+ bos_token_id=0,
173
+ eos_token_id=2,
174
+ forced_eos_token_id=2,
175
+ **common_kwargs,
176
+ ):
177
+ self.langs = langs
178
+ self.src_vocab_size = src_vocab_size
179
+ self.tgt_vocab_size = tgt_vocab_size
180
+ self.d_model = d_model # encoder_embed_dim and decoder_embed_dim
181
+
182
+ self.encoder_ffn_dim = encoder_ffn_dim
183
+ self.encoder_layers = self.num_hidden_layers = encoder_layers
184
+ self.encoder_attention_heads = encoder_attention_heads
185
+ self.encoder_layerdrop = encoder_layerdrop
186
+ self.decoder_layerdrop = decoder_layerdrop
187
+ self.decoder_ffn_dim = decoder_ffn_dim
188
+ self.decoder_layers = decoder_layers
189
+ self.decoder_attention_heads = decoder_attention_heads
190
+ self.max_position_embeddings = max_position_embeddings
191
+ self.init_std = init_std # Normal(0, this parameter)
192
+ self.activation_function = activation_function
193
+
194
+ self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id)
195
+ if "decoder" in common_kwargs:
196
+ del common_kwargs["decoder"]
197
+
198
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
199
+
200
+ # 3 Types of Dropout
201
+ self.attention_dropout = attention_dropout
202
+ self.activation_dropout = activation_dropout
203
+ self.dropout = dropout
204
+
205
+ self.use_cache = use_cache
206
+ super().__init__(
207
+ pad_token_id=pad_token_id,
208
+ bos_token_id=bos_token_id,
209
+ eos_token_id=eos_token_id,
210
+ decoder_start_token_id=decoder_start_token_id,
211
+ is_encoder_decoder=is_encoder_decoder,
212
+ tie_word_embeddings=tie_word_embeddings,
213
+ forced_eos_token_id=forced_eos_token_id,
214
+ max_length=max_length,
215
+ num_beams=num_beams,
216
+ length_penalty=length_penalty,
217
+ early_stopping=early_stopping,
218
+ **common_kwargs,
219
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Note: if you intend to run this script make sure you look under scripts/fsmt/
17
+ # to locate the appropriate script to do the work correctly. There is a set of scripts to:
18
+ # - download and prepare data and run the conversion script
19
+ # - perform eval to get the best hparam into the config
20
+ # - generate model_cards - useful if you have multiple models from the same paper
21
+
22
+ import argparse
23
+ import json
24
+ import os
25
+ import re
26
+ from collections import OrderedDict
27
+ from os.path import basename, dirname
28
+
29
+ import fairseq
30
+ import torch
31
+ from fairseq import hub_utils
32
+ from fairseq.data.dictionary import Dictionary
33
+
34
+ from transformers import FSMTConfig, FSMTForConditionalGeneration
35
+ from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
36
+ from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
37
+ from transformers.utils import WEIGHTS_NAME, logging
38
+
39
+
40
+ logging.set_verbosity_warning()
41
+
42
+ json_indent = 2
43
+
44
+ # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
45
+ # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
46
+ #
47
+ # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
48
+ # * `early_stopping`: `False` consistently scored better
49
+ # * `length_penalty` varied, so will assign the best one depending on the model
50
+ best_score_hparams = {
51
+ # fairseq:
52
+ "wmt19-ru-en": {"length_penalty": 1.1},
53
+ "wmt19-en-ru": {"length_penalty": 1.15},
54
+ "wmt19-en-de": {"length_penalty": 1.0},
55
+ "wmt19-de-en": {"length_penalty": 1.1},
56
+ # allenai:
57
+ "wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
58
+ "wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
59
+ "wmt16-en-de-12-1": {"length_penalty": 0.8},
60
+ "wmt19-de-en-6-6-base": {"length_penalty": 0.6},
61
+ "wmt19-de-en-6-6-big": {"length_penalty": 0.6},
62
+ }
63
+
64
+ # this remaps the different models to their organization names
65
+ org_names = {}
66
+ for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
67
+ org_names[m] = "facebook"
68
+ for m in [
69
+ "wmt16-en-de-dist-12-1",
70
+ "wmt16-en-de-dist-6-1",
71
+ "wmt16-en-de-12-1",
72
+ "wmt19-de-en-6-6-base",
73
+ "wmt19-de-en-6-6-big",
74
+ ]:
75
+ org_names[m] = "allenai"
76
+
77
+
78
+ def rewrite_dict_keys(d):
79
+ # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
80
+ # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
81
+ d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "</w>", k), v) for k, v in d.items())
82
+ keep_keys = "<s> <pad> </s> <unk>".split()
83
+ # restore the special tokens
84
+ for k in keep_keys:
85
+ del d2[f"{k}</w>"]
86
+ d2[k] = d[k] # restore
87
+ return d2
88
+
89
+
90
+ def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path):
91
+ # prep
92
+ assert os.path.exists(fsmt_checkpoint_path)
93
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
94
+ print(f"Writing results to {pytorch_dump_folder_path}")
95
+
96
+ # handle various types of models
97
+
98
+ checkpoint_file = basename(fsmt_checkpoint_path)
99
+ fsmt_folder_path = dirname(fsmt_checkpoint_path)
100
+
101
+ cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
102
+ models = cls.hub_models()
103
+ kwargs = {"bpe": "fastbpe", "tokenizer": "moses"}
104
+ data_name_or_path = "."
105
+ # note: since the model dump is old, fairseq has upgraded its model some
106
+ # time later, and it does a whole lot of rewrites and splits on the saved
107
+ # weights, therefore we can't use torch.load() directly on the model file.
108
+ # see: upgrade_state_dict(state_dict) in fairseq_model.py
109
+ print(f"using checkpoint {checkpoint_file}")
110
+ chkpt = hub_utils.from_pretrained(
111
+ fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs
112
+ )
113
+
114
+ args = vars(chkpt["args"]["model"])
115
+
116
+ src_lang = args["source_lang"]
117
+ tgt_lang = args["target_lang"]
118
+
119
+ data_root = dirname(pytorch_dump_folder_path)
120
+ model_dir = basename(pytorch_dump_folder_path)
121
+
122
+ # dicts
123
+ src_dict_file = os.path.join(fsmt_folder_path, f"dict.{src_lang}.txt")
124
+ tgt_dict_file = os.path.join(fsmt_folder_path, f"dict.{tgt_lang}.txt")
125
+
126
+ src_dict = Dictionary.load(src_dict_file)
127
+ src_vocab = rewrite_dict_keys(src_dict.indices)
128
+ src_vocab_size = len(src_vocab)
129
+ src_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-src.json")
130
+ print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records")
131
+ with open(src_vocab_file, "w", encoding="utf-8") as f:
132
+ f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent))
133
+
134
+ # detect whether this is a do_lower_case situation, which can be derived by checking whether we
135
+ # have at least one uppercase letter in the source vocab
136
+ do_lower_case = True
137
+ for k in src_vocab.keys():
138
+ if not k.islower():
139
+ do_lower_case = False
140
+ break
141
+
142
+ tgt_dict = Dictionary.load(tgt_dict_file)
143
+ tgt_vocab = rewrite_dict_keys(tgt_dict.indices)
144
+ tgt_vocab_size = len(tgt_vocab)
145
+ tgt_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-tgt.json")
146
+ print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records")
147
+ with open(tgt_vocab_file, "w", encoding="utf-8") as f:
148
+ f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent))
149
+
150
+ # merges_file (bpecodes)
151
+ merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"])
152
+ for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
153
+ fsmt_merges_file = os.path.join(fsmt_folder_path, fn)
154
+ if os.path.exists(fsmt_merges_file):
155
+ break
156
+ with open(fsmt_merges_file, encoding="utf-8") as fin:
157
+ merges = fin.read()
158
+ merges = re.sub(r" \d+$", "", merges, 0, re.M) # remove frequency number
159
+ print(f"Generating {merges_file}")
160
+ with open(merges_file, "w", encoding="utf-8") as fout:
161
+ fout.write(merges)
162
+
163
+ # model config
164
+ fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json")
165
+
166
+ # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
167
+ # may have to modify the tokenizer if a different type is used by a future model
168
+ assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
169
+ assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
170
+
171
+ model_conf = {
172
+ "architectures": ["FSMTForConditionalGeneration"],
173
+ "model_type": "fsmt",
174
+ "activation_dropout": args["activation_dropout"],
175
+ "activation_function": "relu",
176
+ "attention_dropout": args["attention_dropout"],
177
+ "d_model": args["decoder_embed_dim"],
178
+ "dropout": args["dropout"],
179
+ "init_std": 0.02,
180
+ "max_position_embeddings": args["max_source_positions"],
181
+ "num_hidden_layers": args["encoder_layers"],
182
+ "src_vocab_size": src_vocab_size,
183
+ "tgt_vocab_size": tgt_vocab_size,
184
+ "langs": [src_lang, tgt_lang],
185
+ "encoder_attention_heads": args["encoder_attention_heads"],
186
+ "encoder_ffn_dim": args["encoder_ffn_embed_dim"],
187
+ "encoder_layerdrop": args["encoder_layerdrop"],
188
+ "encoder_layers": args["encoder_layers"],
189
+ "decoder_attention_heads": args["decoder_attention_heads"],
190
+ "decoder_ffn_dim": args["decoder_ffn_embed_dim"],
191
+ "decoder_layerdrop": args["decoder_layerdrop"],
192
+ "decoder_layers": args["decoder_layers"],
193
+ "bos_token_id": 0,
194
+ "pad_token_id": 1,
195
+ "eos_token_id": 2,
196
+ "is_encoder_decoder": True,
197
+ "scale_embedding": not args["no_scale_embedding"],
198
+ "tie_word_embeddings": args["share_all_embeddings"],
199
+ }
200
+
201
+ # good hparam defaults to start with
202
+ model_conf["num_beams"] = 5
203
+ model_conf["early_stopping"] = False
204
+ if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
205
+ model_conf["length_penalty"] = best_score_hparams[model_dir]["length_penalty"]
206
+ else:
207
+ model_conf["length_penalty"] = 1.0
208
+
209
+ print(f"Generating {fsmt_model_config_file}")
210
+ with open(fsmt_model_config_file, "w", encoding="utf-8") as f:
211
+ f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent))
212
+
213
+ # tokenizer config
214
+ fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE)
215
+
216
+ tokenizer_conf = {
217
+ "langs": [src_lang, tgt_lang],
218
+ "model_max_length": 1024,
219
+ "do_lower_case": do_lower_case,
220
+ }
221
+
222
+ print(f"Generating {fsmt_tokenizer_config_file}")
223
+ with open(fsmt_tokenizer_config_file, "w", encoding="utf-8") as f:
224
+ f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent))
225
+
226
+ # model
227
+ model = chkpt["models"][0]
228
+ model_state_dict = model.state_dict()
229
+
230
+ # rename keys to start with 'model.'
231
+ model_state_dict = OrderedDict(("model." + k, v) for k, v in model_state_dict.items())
232
+
233
+ # remove unneeded keys
234
+ ignore_keys = [
235
+ "model.model",
236
+ "model.encoder.version",
237
+ "model.decoder.version",
238
+ "model.encoder_embed_tokens.weight",
239
+ "model.decoder_embed_tokens.weight",
240
+ "model.encoder.embed_positions._float_tensor",
241
+ "model.decoder.embed_positions._float_tensor",
242
+ ]
243
+ for k in ignore_keys:
244
+ model_state_dict.pop(k, None)
245
+
246
+ config = FSMTConfig.from_pretrained(pytorch_dump_folder_path)
247
+ model_new = FSMTForConditionalGeneration(config)
248
+
249
+ # check that it loads ok
250
+ model_new.load_state_dict(model_state_dict, strict=False)
251
+
252
+ # save
253
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
254
+ print(f"Generating {pytorch_weights_dump_path}")
255
+ torch.save(model_state_dict, pytorch_weights_dump_path)
256
+
257
+ print("Conversion is done!")
258
+ print("\nLast step is to upload the files to s3")
259
+ print(f"cd {data_root}")
260
+ print(f"transformers-cli upload {model_dir}")
261
+
262
+
263
+ if __name__ == "__main__":
264
+ parser = argparse.ArgumentParser()
265
+ # Required parameters
266
+ parser.add_argument(
267
+ "--fsmt_checkpoint_path",
268
+ default=None,
269
+ type=str,
270
+ required=True,
271
+ help=(
272
+ "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
273
+ " bpecodes, etc."
274
+ ),
275
+ )
276
+ parser.add_argument(
277
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
278
+ )
279
+ args = parser.parse_args()
280
+ convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py ADDED
@@ -0,0 +1,1386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # Original implementation: https://github.com/pytorch/fairseq/tree/master/examples/wmt19
17
+ # Authors:
18
+ # - @alexeib Alexei Baevski
19
+ # - @edunov Sergey Edunov
20
+ # - @michaelauli Michael Auli
21
+ # - @myleott Myle Ott
22
+ # - @nng555 Nathan Ng
23
+ # - David Grangier
24
+ # - Kyra Yee
25
+ #
26
+ # Paper: Facebook FAIR's WMT19 News Translation Task Submission https://arxiv.org/abs/1907.06616
27
+ #
28
+ """PyTorch Fairseq model, ported from https://github.com/pytorch/fairseq/tree/master/examples/wmt19"""
29
+
30
+ import math
31
+ from typing import Any, Dict, List, Optional, Tuple, Union
32
+
33
+ import torch
34
+ from torch import Tensor, nn
35
+ from torch.nn import CrossEntropyLoss, LayerNorm
36
+
37
+ from ...activations import ACT2FN
38
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
39
+ from ...modeling_outputs import (
40
+ BaseModelOutput,
41
+ BaseModelOutputWithPastAndCrossAttentions,
42
+ Seq2SeqLMOutput,
43
+ Seq2SeqModelOutput,
44
+ )
45
+ from ...modeling_utils import PreTrainedModel
46
+ from ...utils import (
47
+ add_code_sample_docstrings,
48
+ add_end_docstrings,
49
+ add_start_docstrings,
50
+ add_start_docstrings_to_model_forward,
51
+ logging,
52
+ replace_return_docstrings,
53
+ )
54
+ from .configuration_fsmt import FSMTConfig
55
+
56
+
57
+ logger = logging.get_logger(__name__)
58
+
59
+ _CHECKPOINT_FOR_DOC = "facebook/wmt19-ru-en"
60
+ _CONFIG_FOR_DOC = "FSMTConfig"
61
+
62
+ # See all FSMT models at https://huggingface.co/models?filter=fsmt
63
+
64
+ # Porting notes:
65
+ # this one is modeled after BartModel*
66
+ #
67
+ # Currently only translation (fairseq also has weights for LM)
68
+ #
69
+ # fairseq provides weights for ru-en, en-ru and de-en, en-de pairs. All have been ported.
70
+ # - ru-en, en-ru use asymmetric vocab
71
+ # - de-en, en-de use a merged single vocab (but the code works as if they are separate)
72
+ #
73
+ # Differences with Bart:
74
+ # - not using bos token
75
+ # - 2 separate vocabs (src and target)
76
+ # - embed weights aren't tied
77
+ # - uses a model Ensemble (but that part isn't ported/implemented yet) - so we
78
+ # aren't getting as good of a BLEU score
79
+ # - uses a projection layer at the end of the decoder
80
+ # - doesn't use final_logits_bias
81
+ # - beam search: stops as soon as num_beams == len(hypos) (whereas transformers
82
+ # is not satisfied there and will continue searching until the next cycles
83
+ # aren't promising something better), comparing BLEU scores - the transformers
84
+ # algorithm is slightly superior, therefore using the latter. But if you want
85
+ # to match fairseq outputs, you need to pass ``early_stopping=True`` to ``generate()``.
86
+ #
87
+ # SinusoidalPositionalEmbedding is slightly different from Bart's - generates
88
+ # different embeddings. This implementation is copied verbatim from fairseq with
89
+ # some small changes to make it work here.
90
+ #
91
+ # Other changes:
92
+ # - doesn't support use_cache as Bart's version does
93
+ #
94
+ #
95
+ # FSMTConfig changes with BartConfig
96
+ #
97
+ # Differences with BART:
98
+ # - src/tgt vocabs aren't shared
99
+ # - token embeddings aren't shared
100
+ # - needs a language pair
101
+ # - scale_embedding are True
102
+ #
103
+ # some unused args were removed too
104
+ #
105
+ #
106
+ # TODO:
107
+ # - port model ensemble (fs uses 4 model checkpoints)
108
+ # - solve beam search discrepancies
109
+ # docstyle-ignore
110
+
111
+ """
112
+
113
+ Here is how to compare BLEU scores against fairseq implementation:
114
+
115
+ # en-ru
116
+
117
+ export PAIR=en-ru
118
+ export DATA_DIR=data/$PAIR
119
+ export SAVE_DIR=data/$PAIR
120
+ export BS=8
121
+ export NUM_BEAMS=50
122
+ mkdir -p $DATA_DIR
123
+ sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
124
+ sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
125
+ echo $PAIR
126
+ PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
127
+
128
+ # (fairseq BLEU: 36.4 http://matrix.statmt.org/matrix/output/1914?score_id=37605)
129
+
130
+
131
+ # ru-en
132
+
133
+ export PAIR=ru-en
134
+ export DATA_DIR=data/$PAIR
135
+ export SAVE_DIR=data/$PAIR
136
+ export BS=8
137
+ export NUM_BEAMS=50
138
+ mkdir -p $DATA_DIR
139
+ sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
140
+ sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
141
+ PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
142
+
143
+
144
+ # (fairseq BLEU: 41.3 http://matrix.statmt.org/matrix/output/1907?run_id=6937)
145
+
146
+
147
+ # de-en
148
+
149
+ export PAIR=de-en
150
+ export DATA_DIR=data/$PAIR
151
+ export SAVE_DIR=data/$PAIR
152
+ export BS=8
153
+ export NUM_BEAMS=50
154
+ mkdir -p $DATA_DIR
155
+ sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
156
+ sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
157
+ echo $PAIR
158
+ PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
159
+
160
+ # (fairseq BLEU: 42.3 http://matrix.statmt.org/matrix/output/1902?run_id=6750)
161
+
162
+
163
+
164
+ # en-de
165
+
166
+ export PAIR=en-de
167
+ export DATA_DIR=data/$PAIR
168
+ export SAVE_DIR=data/$PAIR
169
+ export BS=8
170
+ mkdir -p $DATA_DIR
171
+ sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
172
+ sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
173
+ echo $PAIR
174
+ PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
175
+
176
+ # (fairseq BLEU: 43.1 http://matrix.statmt.org/matrix/output/1909?run_id=6862)
177
+
178
+ """
179
+
180
+
181
+ FSMT_START_DOCSTRING = r"""
182
+
183
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
184
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
185
+ etc.)
186
+
187
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
188
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
189
+ and behavior.
190
+
191
+ Parameters:
192
+ config ([`FSMTConfig`]): Model configuration class with all the parameters of the model.
193
+ Initializing with a config file does not load the weights associated with the model, only the
194
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
195
+
196
+ """
197
+ FSMT_GENERATION_EXAMPLE = r"""
198
+ Translation example::
199
+
200
+ ```python
201
+ >>> from transformers import AutoTokenizer, FSMTForConditionalGeneration
202
+
203
+ >>> mname = "facebook/wmt19-ru-en"
204
+ >>> model = FSMTForConditionalGeneration.from_pretrained(mname)
205
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
206
+
207
+ >>> src_text = "Машинное обучение - это здорово, не так ли?"
208
+ >>> input_ids = tokenizer(src_text, return_tensors="pt").input_ids
209
+ >>> outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3)
210
+ >>> tokenizer.decode(outputs[0], skip_special_tokens=True)
211
+ "Machine learning is great, isn't it?"
212
+ ```
213
+
214
+ """
215
+
216
+ FSMT_INPUTS_DOCSTRING = r"""
217
+ Args:
218
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
219
+ Indices of input sequence tokens in the vocabulary.
220
+
221
+ Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
222
+ [`PreTrainedTokenizer.__call__`] for details.
223
+
224
+ [What are input IDs?](../glossary#input-ids)
225
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
226
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
227
+
228
+ - 1 for tokens that are **not masked**,
229
+ - 0 for tokens that are **masked**.
230
+
231
+ [What are attention masks?](../glossary#attention-mask)
232
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
233
+ Indices of decoder input sequence tokens in the vocabulary.
234
+
235
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
236
+ [`PreTrainedTokenizer.__call__`] for details.
237
+
238
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
239
+
240
+ FSMT uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
241
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
242
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
243
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
244
+ be used by default.
245
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
246
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
247
+
248
+ - 1 indicates the head is **not masked**,
249
+ - 0 indicates the head is **masked**.
250
+
251
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
252
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
253
+
254
+ - 1 indicates the head is **not masked**,
255
+ - 0 indicates the head is **masked**.
256
+
257
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
258
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
259
+ 1]`:
260
+
261
+ - 1 indicates the head is **not masked**,
262
+ - 0 indicates the head is **masked**.
263
+
264
+ encoder_outputs (`Tuple(torch.FloatTensor)`, *optional*):
265
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
266
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden-states at
267
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
268
+ past_key_values (`Tuple(torch.FloatTensor)` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
269
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
270
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
271
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
272
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
273
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
274
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
275
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
276
+ model's internal embedding lookup matrix.
277
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
278
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
279
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
280
+ input (see `past_key_values`). This is useful if you want more control over how to convert
281
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
282
+
283
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
284
+ of `inputs_embeds`.
285
+ use_cache (`bool`, *optional*, defaults to `True`):
286
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
287
+ `past_key_values`).
288
+ output_attentions (`bool`, *optional*):
289
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
290
+ tensors for more detail.
291
+ output_hidden_states (`bool`, *optional*):
292
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
293
+ more detail.
294
+ return_dict (`bool`, *optional*):
295
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
296
+ """
297
+
298
+
299
+ def invert_mask(attention_mask):
300
+ """Turns 1->0, 0->1, False->True, True-> False"""
301
+ assert attention_mask.dim() == 2
302
+ return attention_mask.eq(0)
303
+
304
+
305
+ def triu_onnx(x, diagonal=0):
306
+ l = x.shape[0]
307
+ arange = torch.arange(l, device=x.device)
308
+ mask = arange.expand(l, l)
309
+ arange = arange.unsqueeze(-1)
310
+ if diagonal:
311
+ arange = arange + diagonal
312
+ mask = mask >= arange
313
+ return x.masked_fill(mask == 0, 0)
314
+
315
+
316
+ def _prepare_fsmt_decoder_inputs(
317
+ config,
318
+ input_ids,
319
+ decoder_input_ids=None,
320
+ decoder_padding_mask=None,
321
+ causal_mask_dtype=torch.float32,
322
+ ):
323
+ """
324
+ Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided.
325
+ This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during
326
+ generation
327
+ """
328
+ pad_token_id = config.pad_token_id
329
+ if decoder_input_ids is None:
330
+ decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
331
+ bsz, tgt_len = decoder_input_ids.size()
332
+ if decoder_padding_mask is None:
333
+ decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
334
+ else:
335
+ decoder_padding_mask = invert_mask(decoder_padding_mask)
336
+ causal_mask = triu_onnx(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len, dtype=causal_mask_dtype)), 1).to(
337
+ device=decoder_input_ids.device
338
+ )
339
+ return decoder_input_ids, decoder_padding_mask, causal_mask
340
+
341
+
342
+ class PretrainedFSMTModel(PreTrainedModel):
343
+ config_class = FSMTConfig
344
+ base_model_prefix = "model"
345
+
346
+ def _init_weights(self, module):
347
+ std = self.config.init_std
348
+ if isinstance(module, nn.Linear):
349
+ module.weight.data.normal_(mean=0.0, std=std)
350
+ if module.bias is not None:
351
+ module.bias.data.zero_()
352
+ elif isinstance(module, SinusoidalPositionalEmbedding):
353
+ pass
354
+ elif isinstance(module, nn.Embedding):
355
+ module.weight.data.normal_(mean=0.0, std=std)
356
+ if module.padding_idx is not None:
357
+ module.weight.data[module.padding_idx].zero_()
358
+
359
+ @property
360
+ def dummy_inputs(self):
361
+ pad_token = self.config.pad_token_id
362
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
363
+ dummy_inputs = {
364
+ "attention_mask": input_ids.ne(pad_token),
365
+ "input_ids": input_ids,
366
+ }
367
+ return dummy_inputs
368
+
369
+
370
+ def _make_linear_from_emb(emb):
371
+ vocab_size, emb_size = emb.weight.shape
372
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
373
+ lin_layer.weight.data = emb.weight.data
374
+ return lin_layer
375
+
376
+
377
+ # Helper Functions, mostly for making masks
378
+ def _check_shapes(shape_1, shape2):
379
+ if shape_1 != shape2:
380
+ raise AssertionError(f"shape mismatch: {shape_1} != {shape2}")
381
+
382
+
383
+ def shift_tokens_right(input_ids, pad_token_id):
384
+ """Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
385
+
386
+ # replace possible -100 values in labels by `pad_token_id`
387
+ input_ids.masked_fill_(input_ids == -100, pad_token_id)
388
+
389
+ prev_output_tokens = input_ids.clone()
390
+ index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
391
+ prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
392
+ prev_output_tokens[:, 1:] = input_ids[:, :-1]
393
+ return prev_output_tokens
394
+
395
+
396
+ def make_padding_mask(input_ids, padding_idx=1):
397
+ """True for pad tokens"""
398
+ padding_mask = input_ids.eq(padding_idx)
399
+ if not padding_mask.any():
400
+ padding_mask = None
401
+ return padding_mask
402
+
403
+
404
+ # Helper Modules
405
+
406
+
407
+ class EncoderLayer(nn.Module):
408
+ def __init__(self, config: FSMTConfig):
409
+ super().__init__()
410
+ self.embed_dim = config.d_model
411
+ self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)
412
+ self.self_attn_layer_norm = LayerNorm(self.embed_dim)
413
+ self.dropout = config.dropout
414
+ self.activation_fn = ACT2FN[config.activation_function]
415
+ self.activation_dropout = config.activation_dropout
416
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
417
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
418
+ self.final_layer_norm = LayerNorm(self.embed_dim)
419
+
420
+ def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False):
421
+ """
422
+ Args:
423
+ x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
424
+ encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape
425
+ *(batch, src_len)* where padding elements are indicated by `1`.
426
+ for t_tgt, t_src is excluded (or masked out), =0 means it is
427
+ included in attention
428
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
429
+ *(config.encoder_attention_heads,)*.
430
+
431
+ Returns:
432
+ encoded output of shape *(seq_len, batch, embed_dim)*
433
+ """
434
+ residual = x
435
+ x, attn_weights = self.self_attn(
436
+ query=x,
437
+ key=x,
438
+ key_padding_mask=encoder_padding_mask,
439
+ layer_head_mask=layer_head_mask,
440
+ output_attentions=output_attentions,
441
+ )
442
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
443
+ x = residual + x
444
+ x = self.self_attn_layer_norm(x)
445
+
446
+ residual = x
447
+ x = self.activation_fn(self.fc1(x))
448
+ x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
449
+ x = self.fc2(x)
450
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
451
+ x = residual + x
452
+ x = self.final_layer_norm(x)
453
+ return x, attn_weights
454
+
455
+
456
+ class FSMTEncoder(nn.Module):
457
+ """
458
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`EncoderLayer`].
459
+
460
+ Args:
461
+ config: FSMTConfig
462
+ """
463
+
464
+ def __init__(self, config: FSMTConfig, embed_tokens):
465
+ super().__init__()
466
+ self.dropout = config.dropout
467
+ self.layerdrop = config.encoder_layerdrop
468
+ self.padding_idx = embed_tokens.padding_idx
469
+ self.embed_tokens = embed_tokens
470
+ embed_dim = embed_tokens.embedding_dim
471
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
472
+ self.embed_positions = SinusoidalPositionalEmbedding(
473
+ config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
474
+ )
475
+ self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) # type: List[EncoderLayer]
476
+
477
+ def forward(
478
+ self,
479
+ input_ids: torch.Tensor,
480
+ attention_mask: Optional[torch.Tensor] = None,
481
+ inputs_embeds: torch.Tensor = None,
482
+ head_mask: Optional[torch.Tensor] = None,
483
+ output_attentions: bool = False,
484
+ output_hidden_states: bool = False,
485
+ return_dict: bool = True,
486
+ ):
487
+ """
488
+ Args:
489
+ input_ids (`torch.LongTensor`): tokens in the source language of shape
490
+ *(batch, src_len)*
491
+ attention_mask (`torch.LongTensor`): indicating which indices are padding tokens
492
+ inputs_embeds (`torch.FloatTensor`):
493
+ embedding vectors of shape *(batch, src_len, embed_dim)*
494
+ head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
495
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
496
+
497
+ - 1 indicates the head is **not masked**,
498
+ - 0 indicates the head is **masked**.
499
+
500
+ Returns:
501
+ BaseModelOutput or Tuple comprised of:
502
+
503
+ - **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)*
504
+ - **encoder_states** (`Tuple(torch.FloatTensor`)): all intermediate hidden states of shape *(src_len,
505
+ batch, embed_dim)*. Only populated if *output_hidden_states:* is True.
506
+ - **all_attentions** (`Tuple(torch.FloatTensor`)): Attention weights for each layer.
507
+ During training might not be of length n_layers because of layer dropout.
508
+ """
509
+ # check attention mask and invert
510
+ if attention_mask is not None:
511
+ attention_mask = invert_mask(attention_mask)
512
+
513
+ if input_ids is not None and inputs_embeds is not None:
514
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
515
+ elif input_ids is not None:
516
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
517
+ embed_pos = self.embed_positions(input_ids)
518
+ elif inputs_embeds is not None:
519
+ inputs_embeds = inputs_embeds * self.embed_scale
520
+
521
+ # We assume zeros hidden states correspond to padding tokens
522
+ # and create `position_ids` where inputs_embeds[:, :, 0] == 0
523
+ position_ids = inputs_embeds[:, :, 0].masked_fill(
524
+ inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
525
+ )
526
+
527
+ embed_pos = self.embed_positions(position_ids)
528
+ else:
529
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
530
+
531
+ x = inputs_embeds + embed_pos
532
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
533
+
534
+ # B x T x C -> T x B x C
535
+ x = x.transpose(0, 1)
536
+
537
+ encoder_states = () if output_hidden_states else None
538
+ all_attentions = () if output_attentions else None
539
+ # check if head_mask has a correct number of layers specified if desired
540
+ if head_mask is not None:
541
+ assert head_mask.size()[0] == (
542
+ len(self.layers)
543
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
544
+ for idx, encoder_layer in enumerate(self.layers):
545
+ if output_hidden_states:
546
+ x = x.transpose(0, 1) # T x B x C -> B x T x C
547
+ encoder_states += (x,)
548
+ x = x.transpose(0, 1) # B x T x C -> T x B x C
549
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
550
+ dropout_probability = torch.rand([])
551
+ if self.training and (dropout_probability < self.layerdrop): # skip the layer
552
+ attn = None
553
+ else:
554
+ x, attn = encoder_layer(
555
+ x,
556
+ attention_mask,
557
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
558
+ output_attentions=output_attentions,
559
+ )
560
+
561
+ if output_attentions:
562
+ all_attentions = all_attentions + (attn,)
563
+
564
+ # T x B x C -> B x T x C
565
+ x = x.transpose(0, 1)
566
+
567
+ if output_hidden_states:
568
+ encoder_states += (x,)
569
+
570
+ if not return_dict:
571
+ return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)
572
+ return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)
573
+
574
+
575
+ class DecoderLayer(nn.Module):
576
+ def __init__(self, config: FSMTConfig):
577
+ super().__init__()
578
+ self.embed_dim = config.d_model
579
+
580
+ self.self_attn = Attention(
581
+ embed_dim=self.embed_dim,
582
+ num_heads=config.decoder_attention_heads,
583
+ dropout=config.attention_dropout,
584
+ )
585
+ self.dropout = config.dropout
586
+ self.activation_fn = ACT2FN[config.activation_function]
587
+ self.activation_dropout = config.activation_dropout
588
+
589
+ self.self_attn_layer_norm = LayerNorm(self.embed_dim)
590
+ self.encoder_attn = Attention(
591
+ self.embed_dim,
592
+ config.decoder_attention_heads,
593
+ dropout=config.attention_dropout,
594
+ encoder_decoder_attention=True,
595
+ )
596
+ self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
597
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
598
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
599
+ self.final_layer_norm = LayerNorm(self.embed_dim)
600
+
601
+ def forward(
602
+ self,
603
+ x,
604
+ encoder_hidden_states,
605
+ encoder_attn_mask=None,
606
+ layer_state=None,
607
+ causal_mask=None,
608
+ layer_head_mask=None,
609
+ cross_attn_layer_head_mask=None,
610
+ decoder_padding_mask=None,
611
+ output_attentions=False,
612
+ ):
613
+ residual = x
614
+
615
+ if layer_state is None:
616
+ layer_state = {}
617
+
618
+ # Self Attention
619
+ x, self_attn_weights = self.self_attn(
620
+ query=x,
621
+ key=x,
622
+ layer_state=layer_state, # adds keys to layer state
623
+ key_padding_mask=decoder_padding_mask,
624
+ attn_mask=causal_mask,
625
+ layer_head_mask=layer_head_mask,
626
+ output_attentions=output_attentions,
627
+ )
628
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
629
+ x = residual + x
630
+ x = self.self_attn_layer_norm(x)
631
+
632
+ # Cross attention
633
+ residual = x
634
+ assert self.encoder_attn.cache_key != self.self_attn.cache_key
635
+ x, cross_attn_weights = self.encoder_attn(
636
+ query=x,
637
+ key=encoder_hidden_states,
638
+ key_padding_mask=encoder_attn_mask,
639
+ layer_state=layer_state, # mutates layer state
640
+ layer_head_mask=cross_attn_layer_head_mask,
641
+ output_attentions=output_attentions,
642
+ )
643
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
644
+ x = residual + x
645
+ x = self.encoder_attn_layer_norm(x)
646
+
647
+ # Fully Connected
648
+ residual = x
649
+ x = self.activation_fn(self.fc1(x))
650
+ x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
651
+ x = self.fc2(x)
652
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
653
+ x = residual + x
654
+ x = self.final_layer_norm(x)
655
+ return (
656
+ x,
657
+ self_attn_weights,
658
+ layer_state,
659
+ cross_attn_weights,
660
+ ) # layer_state = cache for decoding
661
+
662
+
663
+ class FSMTDecoder(nn.Module):
664
+ """
665
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`]
666
+
667
+ Args:
668
+ config: FSMTConfig
669
+ embed_tokens (nn.Embedding): output embedding
670
+ """
671
+
672
+ def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding):
673
+ super().__init__()
674
+ self.dropout = config.dropout
675
+ self.layerdrop = config.decoder_layerdrop
676
+ self.padding_idx = embed_tokens.padding_idx
677
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
678
+ self.embed_tokens = embed_tokens
679
+ embed_dim = embed_tokens.embedding_dim
680
+ self.embed_positions = SinusoidalPositionalEmbedding(
681
+ config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
682
+ )
683
+ self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.decoder_layers)]) # type: List[DecoderLayer]
684
+
685
+ if is_deepspeed_zero3_enabled():
686
+ import deepspeed
687
+
688
+ with deepspeed.zero.GatheredParameters(self.embed_tokens.weight, modifier_rank=None):
689
+ embed_tokens_weight_shape = self.embed_tokens.weight.shape
690
+ else:
691
+ embed_tokens_weight_shape = self.embed_tokens.weight.shape
692
+ self.output_projection = nn.Linear(embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False)
693
+ self.output_projection.weight = self.embed_tokens.weight
694
+
695
+ def forward(
696
+ self,
697
+ input_ids: torch.Tensor,
698
+ encoder_hidden_states: torch.Tensor,
699
+ encoder_padding_mask: torch.Tensor,
700
+ decoder_padding_mask: torch.Tensor,
701
+ decoder_causal_mask: torch.Tensor,
702
+ head_mask: Optional[torch.Tensor] = None,
703
+ inputs_embeds: Optional[torch.Tensor] = None,
704
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
705
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
706
+ use_cache: bool = False,
707
+ output_attentions: bool = False,
708
+ output_hidden_states: bool = False,
709
+ return_dict: bool = True,
710
+ ):
711
+ """
712
+ Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al.,
713
+ EMNLP 2019).
714
+
715
+ Args:
716
+ input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`):
717
+ previous decoder outputs for teacher forcing
718
+ encoder_hidden_states: output from the encoder, used for
719
+ encoder-side attention
720
+ encoder_padding_mask: for ignoring pad tokens
721
+ past_key_values (dict or None): dictionary used for storing state during generation
722
+ head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
723
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
724
+
725
+ - 1 indicates the head is **not masked**,
726
+ - 0 indicates the head is **masked**.
727
+
728
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
729
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
730
+
731
+ - 1 indicates the head is **not masked**,
732
+ - 0 indicates the head is **masked**.
733
+
734
+ Returns:
735
+ BaseModelOutputWithPast or tuple:
736
+
737
+ - the decoder's features of shape *(batch, tgt_len, embed_dim)*
738
+ - the cache
739
+ - hidden states
740
+ - attentions
741
+ """
742
+ # check attention mask and invert
743
+ if encoder_padding_mask is not None:
744
+ encoder_padding_mask = invert_mask(encoder_padding_mask)
745
+
746
+ if input_ids is not None and inputs_embeds is not None:
747
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
748
+ elif input_ids is not None:
749
+ # embed positions
750
+ positions = self.embed_positions(input_ids)
751
+ if use_cache:
752
+ input_ids = input_ids[:, -1:]
753
+ positions = positions[:, -1:] # happens after we embed them
754
+ x = self.embed_tokens(input_ids) * self.embed_scale
755
+ elif inputs_embeds is not None:
756
+ # We assume zeros hidden states correspond to padding tokens
757
+ # and create `position_ids` where inputs_embeds[:, :, 0] == 0
758
+ position_ids = inputs_embeds[:, :, 0].masked_fill(
759
+ inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
760
+ )
761
+ positions = self.embed_positions(position_ids)
762
+ x = inputs_embeds * self.embed_scale
763
+ else:
764
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
765
+
766
+ x += positions
767
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
768
+
769
+ # Convert to FSMT output format: (BS, seq_len, model_dim) -> (seq_len, BS, model_dim)
770
+ x = x.transpose(0, 1)
771
+ encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
772
+
773
+ # decoder layers
774
+ all_hidden_states = () if output_hidden_states else None
775
+ all_self_attns = () if output_attentions else None
776
+ all_cross_attns = () if output_attentions else None
777
+ next_decoder_cache = []
778
+
779
+ # check if head_mask has a correct number of layers specified if desired
780
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
781
+ if attn_mask is not None:
782
+ assert attn_mask.size()[0] == (len(self.layers)), (
783
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
784
+ f" {head_mask.size()[0]}."
785
+ )
786
+ for idx, decoder_layer in enumerate(self.layers):
787
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
788
+ if output_hidden_states:
789
+ x = x.transpose(0, 1)
790
+ all_hidden_states += (x,)
791
+ x = x.transpose(0, 1)
792
+ if self.training:
793
+ dropout_probability = torch.rand([])
794
+ if dropout_probability < self.layerdrop:
795
+ continue
796
+
797
+ layer_state = past_key_values[idx] if past_key_values is not None else None
798
+
799
+ x, layer_self_attn, layer_past, layer_cross_attn = decoder_layer(
800
+ x,
801
+ encoder_hidden_states,
802
+ encoder_attn_mask=encoder_padding_mask,
803
+ decoder_padding_mask=decoder_padding_mask,
804
+ layer_state=layer_state,
805
+ causal_mask=decoder_causal_mask,
806
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
807
+ cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
808
+ output_attentions=output_attentions,
809
+ )
810
+
811
+ if use_cache:
812
+ next_decoder_cache.append(layer_past.copy())
813
+
814
+ if output_attentions:
815
+ all_self_attns += (layer_self_attn,)
816
+ all_cross_attns += (layer_cross_attn,)
817
+
818
+ # add hidden states from the last decoder layer
819
+ if output_hidden_states:
820
+ x = x.transpose(0, 1)
821
+ all_hidden_states += (x,)
822
+ x = x.transpose(0, 1)
823
+
824
+ # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
825
+ x = x.transpose(0, 1)
826
+ encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
827
+
828
+ x = self.output_projection(x)
829
+
830
+ next_cache = next_decoder_cache if use_cache else None
831
+
832
+ if not return_dict:
833
+ return tuple(
834
+ v for v in [x, next_cache, all_hidden_states, all_self_attns, all_cross_attns] if v is not None
835
+ )
836
+ return BaseModelOutputWithPastAndCrossAttentions(
837
+ last_hidden_state=x,
838
+ past_key_values=next_cache,
839
+ hidden_states=all_hidden_states,
840
+ attentions=all_self_attns,
841
+ cross_attentions=all_cross_attns,
842
+ )
843
+
844
+
845
+ def _reorder_buffer(attn_cache, new_order):
846
+ for k, input_buffer_k in attn_cache.items():
847
+ if input_buffer_k is not None:
848
+ attn_cache[k] = input_buffer_k.index_select(0, new_order)
849
+ return attn_cache
850
+
851
+
852
+ class Attention(nn.Module):
853
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
854
+
855
+ def __init__(
856
+ self,
857
+ embed_dim,
858
+ num_heads,
859
+ dropout=0.0,
860
+ bias=True,
861
+ encoder_decoder_attention=False, # otherwise self_attention
862
+ ):
863
+ super().__init__()
864
+ self.embed_dim = embed_dim
865
+ self.num_heads = num_heads
866
+ self.dropout = dropout
867
+ self.head_dim = embed_dim // num_heads
868
+ assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
869
+ self.scaling = self.head_dim**-0.5
870
+
871
+ self.encoder_decoder_attention = encoder_decoder_attention
872
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
873
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
874
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
875
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
876
+ self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
877
+
878
+ def _shape(self, tensor, seq_len, bsz):
879
+ return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
880
+
881
+ def forward(
882
+ self,
883
+ query,
884
+ key: Optional[Tensor],
885
+ key_padding_mask: Optional[Tensor] = None,
886
+ layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
887
+ attn_mask: Optional[Tensor] = None,
888
+ layer_head_mask: Optional[Tensor] = None,
889
+ output_attentions=False,
890
+ ) -> Tuple[Tensor, Optional[Tensor]]:
891
+ """Input shape: Time(SeqLen) x Batch x Channel"""
892
+ static_kv: bool = self.encoder_decoder_attention
893
+ tgt_len, bsz, embed_dim = query.size()
894
+ assert embed_dim == self.embed_dim
895
+ assert list(query.size()) == [tgt_len, bsz, embed_dim]
896
+ # get here for encoder decoder cause of static_kv
897
+ if layer_state is not None: # reuse k,v and encoder_padding_mask
898
+ saved_state = layer_state.get(self.cache_key, {})
899
+ if "prev_key" in saved_state and static_kv:
900
+ # previous time steps are cached - no need to recompute key and value if they are static
901
+ key = None
902
+ else:
903
+ saved_state = None
904
+ layer_state = {}
905
+
906
+ q = self.q_proj(query) * self.scaling
907
+ if static_kv:
908
+ if key is None:
909
+ k = v = None
910
+ else:
911
+ k = self.k_proj(key)
912
+ v = self.v_proj(key)
913
+ else:
914
+ k = self.k_proj(query)
915
+ v = self.v_proj(query)
916
+
917
+ q = self._shape(q, tgt_len, bsz)
918
+ if k is not None:
919
+ k = self._shape(k, -1, bsz)
920
+ if v is not None:
921
+ v = self._shape(v, -1, bsz)
922
+
923
+ if saved_state is not None:
924
+ k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)
925
+
926
+ # Update cache
927
+ layer_state[self.cache_key] = {
928
+ "prev_key": k.view(bsz, self.num_heads, -1, self.head_dim),
929
+ "prev_value": v.view(bsz, self.num_heads, -1, self.head_dim),
930
+ "prev_key_padding_mask": key_padding_mask if not static_kv else None,
931
+ }
932
+
933
+ assert k is not None
934
+ src_len = k.size(1)
935
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
936
+ assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
937
+
938
+ if attn_mask is not None:
939
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask
940
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
941
+
942
+ # This is part of a workaround to get around fork/join parallelism not supporting Optional types.
943
+ if key_padding_mask is not None and key_padding_mask.dim() == 0:
944
+ key_padding_mask = None
945
+ assert key_padding_mask is None or key_padding_mask.size()[:2] == (
946
+ bsz,
947
+ src_len,
948
+ )
949
+
950
+ if key_padding_mask is not None: # don't attend to padding symbols
951
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
952
+ reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
953
+ attn_weights = attn_weights.masked_fill(reshaped, torch.finfo(attn_weights.dtype).min)
954
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
955
+
956
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
957
+
958
+ if layer_head_mask is not None:
959
+ assert layer_head_mask.size() == (
960
+ self.num_heads,
961
+ ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
962
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
963
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
964
+
965
+ if output_attentions:
966
+ # make sure that attn_weights are included in graph
967
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
968
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
969
+ else:
970
+ attn_weights_reshaped = None
971
+
972
+ attn_probs = nn.functional.dropout(
973
+ attn_weights,
974
+ p=self.dropout,
975
+ training=self.training,
976
+ )
977
+
978
+ assert v is not None
979
+ attn_output = torch.bmm(attn_probs, v)
980
+ assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
981
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
982
+ attn_output = self.out_proj(attn_output)
983
+
984
+ return attn_output, attn_weights_reshaped
985
+
986
+ def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):
987
+ # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
988
+ if "prev_key" in saved_state:
989
+ _prev_key = saved_state["prev_key"]
990
+ assert _prev_key is not None
991
+ prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
992
+ if static_kv:
993
+ k = prev_key
994
+ else:
995
+ assert k is not None
996
+ k = torch.cat([prev_key, k], dim=1)
997
+ if "prev_value" in saved_state:
998
+ _prev_value = saved_state["prev_value"]
999
+ assert _prev_value is not None
1000
+ prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
1001
+ if static_kv:
1002
+ v = prev_value
1003
+ else:
1004
+ assert v is not None
1005
+ v = torch.cat([prev_value, v], dim=1)
1006
+ assert k is not None and v is not None
1007
+ prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None)
1008
+ if prev_key_padding_mask is not None:
1009
+ if static_kv:
1010
+ new_key_padding_mask = prev_key_padding_mask
1011
+ else:
1012
+ new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)
1013
+ else:
1014
+ new_key_padding_mask = key_padding_mask
1015
+ return k, v, new_key_padding_mask
1016
+
1017
+
1018
+ def fill_with_neg_inf(t):
1019
+ """FP16-compatible function that fills a input_ids with -inf."""
1020
+ return t.float().fill_(torch.finfo(t.dtype).min).type_as(t)
1021
+
1022
+
1023
+ # Public API
1024
+ def _get_shape(t):
1025
+ return getattr(t, "shape", None)
1026
+
1027
+
1028
+ @add_start_docstrings(
1029
+ "The bare FSMT Model outputting raw hidden-states without any specific head on top.",
1030
+ FSMT_START_DOCSTRING,
1031
+ )
1032
+ class FSMTModel(PretrainedFSMTModel):
1033
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]
1034
+
1035
+ def __init__(self, config: FSMTConfig):
1036
+ super().__init__(config)
1037
+
1038
+ padding_idx = config.pad_token_id
1039
+ encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx)
1040
+ decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx)
1041
+
1042
+ self.encoder = FSMTEncoder(config, encoder_embed_tokens)
1043
+ self.decoder = FSMTDecoder(config, decoder_embed_tokens)
1044
+
1045
+ # Initialize weights and apply final processing
1046
+ self.post_init()
1047
+
1048
+ def get_encoder(self):
1049
+ return self.encoder
1050
+
1051
+ def get_decoder(self):
1052
+ return self.decoder
1053
+
1054
+ def _tie_weights(self):
1055
+ if self.config.tie_word_embeddings:
1056
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings())
1057
+ self._tie_or_clone_weights(self.decoder.output_projection, self.get_input_embeddings())
1058
+
1059
+ @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
1060
+ @add_code_sample_docstrings(
1061
+ checkpoint=_CHECKPOINT_FOR_DOC,
1062
+ output_type=Seq2SeqModelOutput,
1063
+ config_class=_CONFIG_FOR_DOC,
1064
+ )
1065
+ def forward(
1066
+ self,
1067
+ input_ids: torch.LongTensor,
1068
+ attention_mask: Optional[torch.Tensor] = None,
1069
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1070
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1071
+ head_mask: Optional[torch.Tensor] = None,
1072
+ decoder_head_mask: Optional[torch.Tensor] = None,
1073
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1074
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
1075
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1076
+ use_cache: Optional[bool] = None,
1077
+ output_attentions: Optional[bool] = None,
1078
+ output_hidden_states: Optional[bool] = None,
1079
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1080
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1081
+ return_dict: Optional[bool] = None,
1082
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
1083
+ if decoder_input_ids is None:
1084
+ use_cache = False
1085
+
1086
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1087
+ output_hidden_states = (
1088
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1089
+ )
1090
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1091
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1092
+
1093
+ # make masks if user doesn't supply
1094
+ if not use_cache and input_ids is not None:
1095
+ decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_fsmt_decoder_inputs(
1096
+ self.config,
1097
+ input_ids,
1098
+ decoder_input_ids=decoder_input_ids,
1099
+ decoder_padding_mask=decoder_attention_mask,
1100
+ causal_mask_dtype=self.decoder.embed_tokens.weight.dtype,
1101
+ )
1102
+ else:
1103
+ decoder_padding_mask, causal_mask = None, None
1104
+
1105
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
1106
+ raise ValueError("Make sure that `decoder_input_ids` or `decoder_inputs_embeds` are passed.")
1107
+
1108
+ if encoder_outputs is None:
1109
+ encoder_outputs = self.encoder(
1110
+ input_ids=input_ids,
1111
+ attention_mask=attention_mask,
1112
+ inputs_embeds=inputs_embeds,
1113
+ head_mask=head_mask,
1114
+ output_attentions=output_attentions,
1115
+ output_hidden_states=output_hidden_states,
1116
+ return_dict=return_dict,
1117
+ )
1118
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False
1119
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
1120
+ encoder_outputs = BaseModelOutput(
1121
+ last_hidden_state=encoder_outputs[0],
1122
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
1123
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
1124
+ )
1125
+
1126
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1127
+ decoder_outputs = self.decoder(
1128
+ decoder_input_ids,
1129
+ encoder_outputs[0],
1130
+ attention_mask,
1131
+ decoder_padding_mask,
1132
+ decoder_causal_mask=causal_mask,
1133
+ inputs_embeds=decoder_inputs_embeds,
1134
+ head_mask=decoder_head_mask,
1135
+ cross_attn_head_mask=cross_attn_head_mask,
1136
+ past_key_values=past_key_values,
1137
+ use_cache=use_cache,
1138
+ output_attentions=output_attentions,
1139
+ output_hidden_states=output_hidden_states,
1140
+ return_dict=return_dict,
1141
+ )
1142
+
1143
+ if not return_dict:
1144
+ return decoder_outputs + encoder_outputs
1145
+
1146
+ return Seq2SeqModelOutput(
1147
+ last_hidden_state=decoder_outputs.last_hidden_state,
1148
+ past_key_values=decoder_outputs.past_key_values,
1149
+ decoder_hidden_states=decoder_outputs.hidden_states,
1150
+ decoder_attentions=decoder_outputs.attentions,
1151
+ cross_attentions=decoder_outputs.cross_attentions,
1152
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
1153
+ encoder_hidden_states=encoder_outputs.hidden_states,
1154
+ encoder_attentions=encoder_outputs.attentions,
1155
+ )
1156
+
1157
+ def get_input_embeddings(self):
1158
+ return self.encoder.embed_tokens
1159
+
1160
+ def set_input_embeddings(self, value):
1161
+ self.encoder.embed_tokens = value
1162
+
1163
+ def get_output_embeddings(self):
1164
+ return self.decoder.embed_tokens
1165
+
1166
+ def set_output_embeddings(self, value):
1167
+ self.decoder.embed_tokens = value
1168
+
1169
+
1170
+ @add_start_docstrings(
1171
+ "The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING
1172
+ )
1173
+ class FSMTForConditionalGeneration(PretrainedFSMTModel):
1174
+ base_model_prefix = "model"
1175
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]
1176
+
1177
+ def __init__(self, config: FSMTConfig):
1178
+ super().__init__(config)
1179
+ base_model = FSMTModel(config)
1180
+ self.model = base_model
1181
+
1182
+ # Initialize weights and apply final processing
1183
+ self.post_init()
1184
+
1185
+ @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
1186
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
1187
+ @add_end_docstrings(FSMT_GENERATION_EXAMPLE)
1188
+ def forward(
1189
+ self,
1190
+ input_ids: torch.LongTensor,
1191
+ attention_mask: Optional[torch.Tensor] = None,
1192
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1193
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
1194
+ head_mask: Optional[torch.Tensor] = None,
1195
+ decoder_head_mask: Optional[torch.Tensor] = None,
1196
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
1197
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
1198
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1199
+ inputs_embeds: Optional[torch.Tensor] = None,
1200
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
1201
+ labels: Optional[torch.LongTensor] = None,
1202
+ use_cache: Optional[bool] = None,
1203
+ output_attentions: Optional[bool] = None,
1204
+ output_hidden_states: Optional[bool] = None,
1205
+ return_dict: Optional[bool] = None,
1206
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
1207
+ r"""
1208
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1209
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1210
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1211
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1212
+
1213
+ Returns:
1214
+
1215
+ """
1216
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1217
+
1218
+ if labels is not None:
1219
+ use_cache = False
1220
+
1221
+ outputs = self.model(
1222
+ input_ids,
1223
+ inputs_embeds=inputs_embeds,
1224
+ attention_mask=attention_mask,
1225
+ decoder_input_ids=decoder_input_ids,
1226
+ decoder_inputs_embeds=decoder_inputs_embeds,
1227
+ encoder_outputs=encoder_outputs,
1228
+ decoder_attention_mask=decoder_attention_mask,
1229
+ head_mask=head_mask,
1230
+ decoder_head_mask=decoder_head_mask,
1231
+ cross_attn_head_mask=cross_attn_head_mask,
1232
+ past_key_values=past_key_values,
1233
+ use_cache=use_cache,
1234
+ output_attentions=output_attentions,
1235
+ output_hidden_states=output_hidden_states,
1236
+ return_dict=return_dict,
1237
+ )
1238
+ lm_logits = outputs[0]
1239
+
1240
+ masked_lm_loss = None
1241
+ if labels is not None:
1242
+ loss_fct = CrossEntropyLoss()
1243
+ # TODO(SS): do we need to ignore pad tokens in labels?
1244
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1))
1245
+
1246
+ if not return_dict:
1247
+ output = (lm_logits,) + outputs[1:]
1248
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1249
+
1250
+ return Seq2SeqLMOutput(
1251
+ loss=masked_lm_loss,
1252
+ logits=lm_logits,
1253
+ past_key_values=outputs.past_key_values,
1254
+ decoder_hidden_states=outputs.decoder_hidden_states,
1255
+ decoder_attentions=outputs.decoder_attentions,
1256
+ cross_attentions=outputs.cross_attentions,
1257
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
1258
+ encoder_hidden_states=outputs.encoder_hidden_states,
1259
+ encoder_attentions=outputs.encoder_attentions,
1260
+ )
1261
+
1262
+ def prepare_inputs_for_generation(
1263
+ self,
1264
+ decoder_input_ids,
1265
+ past_key_values=None,
1266
+ attention_mask=None,
1267
+ head_mask=None,
1268
+ decoder_head_mask=None,
1269
+ cross_attn_head_mask=None,
1270
+ use_cache=None,
1271
+ encoder_outputs=None,
1272
+ **kwargs,
1273
+ ):
1274
+ return {
1275
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
1276
+ "encoder_outputs": encoder_outputs,
1277
+ "past_key_values": past_key_values,
1278
+ "decoder_input_ids": decoder_input_ids,
1279
+ "attention_mask": attention_mask,
1280
+ "head_mask": head_mask,
1281
+ "decoder_head_mask": decoder_head_mask,
1282
+ "cross_attn_head_mask": cross_attn_head_mask,
1283
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
1284
+ }
1285
+
1286
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
1287
+ return shift_tokens_right(labels, self.config.pad_token_id)
1288
+
1289
+ @staticmethod
1290
+ def _reorder_cache(past_key_values, beam_idx):
1291
+ reordered_past = []
1292
+ for layer_past in past_key_values:
1293
+ # get the correct batch idx from decoder layer's batch dim for cross and self-attn
1294
+ layer_past_new = {
1295
+ attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
1296
+ }
1297
+ reordered_past.append(layer_past_new)
1298
+ return reordered_past
1299
+
1300
+ def get_encoder(self):
1301
+ return self.model.encoder
1302
+
1303
+ def get_decoder(self):
1304
+ return self.model.decoder
1305
+
1306
+ def get_output_embeddings(self):
1307
+ return self.model.decoder.embed_tokens
1308
+
1309
+ def set_output_embeddings(self, value):
1310
+ self.model.decoder.embed_tokens = value
1311
+
1312
+
1313
+ class SinusoidalPositionalEmbedding(nn.Embedding):
1314
+ """
1315
+ This module produces sinusoidal positional embeddings of any length.
1316
+
1317
+ We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge.
1318
+
1319
+ Padding symbols are ignored.
1320
+
1321
+ These embeddings get automatically extended in forward if more positions is needed.
1322
+ """
1323
+
1324
+ def __init__(self, num_positions, embedding_dim, padding_idx):
1325
+ self.make_weight(num_positions, embedding_dim, padding_idx)
1326
+
1327
+ def make_weight(self, num_positions, embedding_dim, padding_idx):
1328
+ weight = self.get_embedding(num_positions, embedding_dim, padding_idx)
1329
+ if not hasattr(self, "weight"):
1330
+ # in ___init__
1331
+ super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight)
1332
+ else:
1333
+ # in forward put the weights on the correct dtype and device of the param
1334
+ weight = weight.to(dtype=self.weight.dtype, device=self.weight.device)
1335
+ self.weight = nn.Parameter(weight)
1336
+ self.weight.detach_()
1337
+ self.weight.requires_grad = False
1338
+
1339
+ @staticmethod
1340
+ def get_embedding(num_embeddings, embedding_dim, padding_idx):
1341
+ """
1342
+ Build sinusoidal embeddings.
1343
+
1344
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
1345
+ "Attention Is All You Need".
1346
+ """
1347
+ half_dim = embedding_dim // 2
1348
+ emb = math.log(10000) / (half_dim - 1)
1349
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
1350
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
1351
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
1352
+ if embedding_dim % 2 == 1:
1353
+ # zero pad
1354
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
1355
+ if padding_idx is not None:
1356
+ emb[padding_idx, :] = 0
1357
+ return emb
1358
+
1359
+ @staticmethod
1360
+ def make_positions(tensor, padding_idx: int):
1361
+ """
1362
+ Replace non-padding symbols with their position numbers.
1363
+
1364
+ Position numbers begin at padding_idx+1. Padding symbols are ignored.
1365
+ """
1366
+ # The series of casts and type-conversions here are carefully
1367
+ # balanced to both work with ONNX export and XLA. In particular XLA
1368
+ # prefers ints, cumsum defaults to output longs, and ONNX doesn't know
1369
+ # how to handle the dtype kwarg in cumsum.
1370
+ mask = tensor.ne(padding_idx).int()
1371
+ return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
1372
+
1373
+ def forward(
1374
+ self,
1375
+ input,
1376
+ incremental_state: Optional[Any] = None,
1377
+ timestep: Optional[Tensor] = None,
1378
+ ):
1379
+ """Input is expected to be of size [bsz x seqlen]."""
1380
+ bsz, seq_len = input.shape[:2]
1381
+ max_pos = self.padding_idx + 1 + seq_len
1382
+ if max_pos > self.weight.size(0):
1383
+ # expand embeddings if needed
1384
+ self.make_weight(max_pos, self.embedding_dim, self.padding_idx)
1385
+ positions = self.make_positions(input, self.padding_idx)
1386
+ return super().forward(positions)
llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py ADDED
@@ -0,0 +1,519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for FSMT."""
16
+
17
+
18
+ import json
19
+ import os
20
+ import re
21
+ import unicodedata
22
+ from typing import Dict, List, Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {
31
+ "src_vocab_file": "vocab-src.json",
32
+ "tgt_vocab_file": "vocab-tgt.json",
33
+ "merges_file": "merges.txt",
34
+ }
35
+
36
+
37
+ def get_pairs(word):
38
+ """
39
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
40
+ strings)
41
+ """
42
+ pairs = set()
43
+ prev_char = word[0]
44
+ for char in word[1:]:
45
+ pairs.add((prev_char, char))
46
+ prev_char = char
47
+ return pairs
48
+
49
+
50
+ def replace_unicode_punct(text):
51
+ """
52
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
53
+ """
54
+ text = text.replace(",", ",")
55
+ text = re.sub(r"。\s*", ". ", text)
56
+ text = text.replace("、", ",")
57
+ text = text.replace("”", '"')
58
+ text = text.replace("“", '"')
59
+ text = text.replace("∶", ":")
60
+ text = text.replace(":", ":")
61
+ text = text.replace("?", "?")
62
+ text = text.replace("《", '"')
63
+ text = text.replace("》", '"')
64
+ text = text.replace(")", ")")
65
+ text = text.replace("!", "!")
66
+ text = text.replace("(", "(")
67
+ text = text.replace(";", ";")
68
+ text = text.replace("1", "1")
69
+ text = text.replace("」", '"')
70
+ text = text.replace("「", '"')
71
+ text = text.replace("0", "0")
72
+ text = text.replace("3", "3")
73
+ text = text.replace("2", "2")
74
+ text = text.replace("5", "5")
75
+ text = text.replace("6", "6")
76
+ text = text.replace("9", "9")
77
+ text = text.replace("7", "7")
78
+ text = text.replace("8", "8")
79
+ text = text.replace("4", "4")
80
+ text = re.sub(r".\s*", ". ", text)
81
+ text = text.replace("~", "~")
82
+ text = text.replace("’", "'")
83
+ text = text.replace("…", "...")
84
+ text = text.replace("━", "-")
85
+ text = text.replace("〈", "<")
86
+ text = text.replace("〉", ">")
87
+ text = text.replace("【", "[")
88
+ text = text.replace("】", "]")
89
+ text = text.replace("%", "%")
90
+ return text
91
+
92
+
93
+ def remove_non_printing_char(text):
94
+ """
95
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
96
+ """
97
+ output = []
98
+ for char in text:
99
+ cat = unicodedata.category(char)
100
+ if cat.startswith("C"):
101
+ continue
102
+ output.append(char)
103
+ return "".join(output)
104
+
105
+
106
+ # Porting notes:
107
+ # this one is modeled after XLMTokenizer
108
+ #
109
+ # added:
110
+ # - src_vocab_file,
111
+ # - tgt_vocab_file,
112
+ # - langs,
113
+
114
+
115
+ class FSMTTokenizer(PreTrainedTokenizer):
116
+ """
117
+ Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
118
+
119
+ - Moses preprocessing and tokenization.
120
+ - Normalizing all inputs text.
121
+ - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
122
+ "__classify__") to a vocabulary.
123
+ - The argument `langs` defines a pair of languages.
124
+
125
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
126
+ this superclass for more information regarding those methods.
127
+
128
+ Args:
129
+ langs (`List[str]`, *optional*):
130
+ A list of two languages to translate from and to, for instance `["en", "ru"]`.
131
+ src_vocab_file (`str`, *optional*):
132
+ File containing the vocabulary for the source language.
133
+ tgt_vocab_file (`st`, *optional*):
134
+ File containing the vocabulary for the target language.
135
+ merges_file (`str`, *optional*):
136
+ File containing the merges.
137
+ do_lower_case (`bool`, *optional*, defaults to `False`):
138
+ Whether or not to lowercase the input when tokenizing.
139
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
140
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
141
+ token instead.
142
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
143
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
144
+
145
+ <Tip>
146
+
147
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
148
+ sequence. The token used is the `cls_token`.
149
+
150
+ </Tip>
151
+
152
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
153
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
154
+ sequence classification or for a text and a question for question answering. It is also used as the last
155
+ token of a sequence built with special tokens.
156
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
157
+ The token used for padding, for example when batching sequences of different lengths.
158
+
159
+ """
160
+
161
+ vocab_files_names = VOCAB_FILES_NAMES
162
+ model_input_names = ["input_ids", "attention_mask"]
163
+
164
+ def __init__(
165
+ self,
166
+ langs=None,
167
+ src_vocab_file=None,
168
+ tgt_vocab_file=None,
169
+ merges_file=None,
170
+ do_lower_case=False,
171
+ unk_token="<unk>",
172
+ bos_token="<s>",
173
+ sep_token="</s>",
174
+ pad_token="<pad>",
175
+ **kwargs,
176
+ ):
177
+ try:
178
+ import sacremoses
179
+ except ImportError:
180
+ raise ImportError(
181
+ "You need to install sacremoses to use XLMTokenizer. "
182
+ "See https://pypi.org/project/sacremoses/ for installation."
183
+ )
184
+
185
+ self.sm = sacremoses
186
+
187
+ self.src_vocab_file = src_vocab_file
188
+ self.tgt_vocab_file = tgt_vocab_file
189
+ self.merges_file = merges_file
190
+ self.do_lower_case = do_lower_case
191
+
192
+ # cache of sm.MosesPunctNormalizer instance
193
+ self.cache_moses_punct_normalizer = {}
194
+ # cache of sm.MosesTokenizer instance
195
+ self.cache_moses_tokenizer = {}
196
+ self.cache_moses_detokenizer = {}
197
+
198
+ if langs and len(langs) == 2:
199
+ self.src_lang, self.tgt_lang = langs
200
+ else:
201
+ raise ValueError(
202
+ f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. "
203
+ "Usually that means that tokenizer can't find a mapping for the given model path "
204
+ "in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer."
205
+ )
206
+
207
+ with open(src_vocab_file, encoding="utf-8") as src_vocab_handle:
208
+ self.encoder = json.load(src_vocab_handle)
209
+ with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle:
210
+ tgt_vocab = json.load(tgt_vocab_handle)
211
+ self.decoder = {v: k for k, v in tgt_vocab.items()}
212
+ with open(merges_file, encoding="utf-8") as merges_handle:
213
+ merges = merges_handle.read().split("\n")[:-1]
214
+ merges = [tuple(merge.split()[:2]) for merge in merges]
215
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
216
+ self.cache = {}
217
+ super().__init__(
218
+ langs=langs,
219
+ src_vocab_file=src_vocab_file,
220
+ tgt_vocab_file=tgt_vocab_file,
221
+ merges_file=merges_file,
222
+ do_lower_case=do_lower_case,
223
+ unk_token=unk_token,
224
+ bos_token=bos_token,
225
+ sep_token=sep_token,
226
+ pad_token=pad_token,
227
+ **kwargs,
228
+ )
229
+
230
+ # hack override
231
+ def get_vocab(self) -> Dict[str, int]:
232
+ return self.get_src_vocab()
233
+
234
+ # hack override
235
+ @property
236
+ def vocab_size(self) -> int:
237
+ return self.src_vocab_size
238
+
239
+ def moses_punct_norm(self, text, lang):
240
+ if lang not in self.cache_moses_punct_normalizer:
241
+ punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
242
+ self.cache_moses_punct_normalizer[lang] = punct_normalizer
243
+ return self.cache_moses_punct_normalizer[lang].normalize(text)
244
+
245
+ def moses_tokenize(self, text, lang):
246
+ if lang not in self.cache_moses_tokenizer:
247
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
248
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
249
+ return self.cache_moses_tokenizer[lang].tokenize(
250
+ text, aggressive_dash_splits=True, return_str=False, escape=True
251
+ )
252
+
253
+ def moses_detokenize(self, tokens, lang):
254
+ if lang not in self.cache_moses_detokenizer:
255
+ moses_detokenizer = self.sm.MosesDetokenizer(lang=lang)
256
+ self.cache_moses_detokenizer[lang] = moses_detokenizer
257
+ return self.cache_moses_detokenizer[lang].detokenize(tokens)
258
+
259
+ def moses_pipeline(self, text, lang):
260
+ text = replace_unicode_punct(text)
261
+ text = self.moses_punct_norm(text, lang)
262
+ text = remove_non_printing_char(text)
263
+ return text
264
+
265
+ @property
266
+ def src_vocab_size(self):
267
+ return len(self.encoder)
268
+
269
+ @property
270
+ def tgt_vocab_size(self):
271
+ return len(self.decoder)
272
+
273
+ def get_src_vocab(self):
274
+ return dict(self.encoder, **self.added_tokens_encoder)
275
+
276
+ def get_tgt_vocab(self):
277
+ return dict(self.decoder, **self.added_tokens_decoder)
278
+
279
+ def bpe(self, token):
280
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
281
+ if token in self.cache:
282
+ return self.cache[token]
283
+ pairs = get_pairs(word)
284
+
285
+ if not pairs:
286
+ return token + "</w>"
287
+
288
+ while True:
289
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
290
+ if bigram not in self.bpe_ranks:
291
+ break
292
+ first, second = bigram
293
+ new_word = []
294
+ i = 0
295
+ while i < len(word):
296
+ try:
297
+ j = word.index(first, i)
298
+ except ValueError:
299
+ new_word.extend(word[i:])
300
+ break
301
+ else:
302
+ new_word.extend(word[i:j])
303
+ i = j
304
+
305
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
306
+ new_word.append(first + second)
307
+ i += 2
308
+ else:
309
+ new_word.append(word[i])
310
+ i += 1
311
+ new_word = tuple(new_word)
312
+ word = new_word
313
+ if len(word) == 1:
314
+ break
315
+ else:
316
+ pairs = get_pairs(word)
317
+ word = " ".join(word)
318
+ if word == "\n </w>":
319
+ word = "\n</w>"
320
+ self.cache[token] = word
321
+ return word
322
+
323
+ def _tokenize(self, text, lang="en", bypass_tokenizer=False):
324
+ """
325
+ Tokenize a string given language code using Moses.
326
+
327
+ Details of tokenization:
328
+
329
+ - [sacremoses](https://github.com/alvations/sacremoses): port of Moses
330
+ - Install with `pip install sacremoses`
331
+
332
+ Args:
333
+ - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
334
+ languages. However, we don't enforce it.
335
+ - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
336
+ (bool). If True, we only apply BPE.
337
+
338
+ Returns:
339
+ List of tokens.
340
+ """
341
+ # ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en
342
+ # if lang != self.src_lang:
343
+ # raise ValueError(f"Expected lang={self.src_lang}, but got {lang}")
344
+ lang = self.src_lang
345
+
346
+ if self.do_lower_case:
347
+ text = text.lower()
348
+
349
+ if bypass_tokenizer:
350
+ text = text.split()
351
+ else:
352
+ text = self.moses_pipeline(text, lang=lang)
353
+ text = self.moses_tokenize(text, lang=lang)
354
+
355
+ split_tokens = []
356
+ for token in text:
357
+ if token:
358
+ split_tokens.extend(list(self.bpe(token).split(" ")))
359
+
360
+ return split_tokens
361
+
362
+ def _convert_token_to_id(self, token):
363
+ """Converts a token (str) in an id using the vocab."""
364
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
365
+
366
+ def _convert_id_to_token(self, index):
367
+ """Converts an index (integer) in a token (str) using the vocab."""
368
+ return self.decoder.get(index, self.unk_token)
369
+
370
+ def convert_tokens_to_string(self, tokens):
371
+ """Converts a sequence of tokens (string) in a single string."""
372
+
373
+ # remove BPE
374
+ tokens = [t.replace(" ", "").replace("</w>", " ") for t in tokens]
375
+ tokens = "".join(tokens).split()
376
+ # detokenize
377
+ text = self.moses_detokenize(tokens, self.tgt_lang)
378
+ return text
379
+
380
+ def build_inputs_with_special_tokens(
381
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
382
+ ) -> List[int]:
383
+ """
384
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
385
+ adding special tokens. A FAIRSEQ Transformer sequence has the following format:
386
+
387
+ - single sequence: `<s> X </s>`
388
+ - pair of sequences: `<s> A </s> B </s>`
389
+
390
+ Args:
391
+ token_ids_0 (`List[int]`):
392
+ List of IDs to which the special tokens will be added.
393
+ token_ids_1 (`List[int]`, *optional*):
394
+ Optional second list of IDs for sequence pairs.
395
+
396
+ Returns:
397
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
398
+ """
399
+ sep = [self.sep_token_id]
400
+
401
+ # no bos used in fairseq
402
+ if token_ids_1 is None:
403
+ return token_ids_0 + sep
404
+ return token_ids_0 + sep + token_ids_1 + sep
405
+
406
+ def get_special_tokens_mask(
407
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
408
+ ) -> List[int]:
409
+ """
410
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
411
+ special tokens using the tokenizer `prepare_for_model` method.
412
+
413
+ Args:
414
+ token_ids_0 (`List[int]`):
415
+ List of IDs.
416
+ token_ids_1 (`List[int]`, *optional*):
417
+ Optional second list of IDs for sequence pairs.
418
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
419
+ Whether or not the token list is already formatted with special tokens for the model.
420
+
421
+ Returns:
422
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
423
+ """
424
+
425
+ if already_has_special_tokens:
426
+ return super().get_special_tokens_mask(
427
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
428
+ )
429
+ # no bos used in fairseq
430
+ if token_ids_1 is not None:
431
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
432
+ return ([0] * len(token_ids_0)) + [1]
433
+
434
+ def create_token_type_ids_from_sequences(
435
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
436
+ ) -> List[int]:
437
+ """
438
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ
439
+ Transformer sequence pair mask has the following format:
440
+
441
+ ```
442
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
443
+ | first sequence | second sequence |
444
+ ```
445
+
446
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
447
+
448
+ Args:
449
+ token_ids_0 (`List[int]`):
450
+ List of IDs.
451
+ token_ids_1 (`List[int]`, *optional*):
452
+ Optional second list of IDs for sequence pairs.
453
+
454
+ Returns:
455
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
456
+
457
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An
458
+ FAIRSEQ_TRANSFORMER sequence pair mask has the following format:
459
+ """
460
+ sep = [self.sep_token_id]
461
+
462
+ # no bos used in fairseq
463
+ if token_ids_1 is None:
464
+ return len(token_ids_0 + sep) * [0]
465
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
466
+
467
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
468
+ if not os.path.isdir(save_directory):
469
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
470
+ return
471
+
472
+ src_vocab_file = os.path.join(
473
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"]
474
+ )
475
+ tgt_vocab_file = os.path.join(
476
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"]
477
+ )
478
+ merges_file = os.path.join(
479
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
480
+ )
481
+
482
+ with open(src_vocab_file, "w", encoding="utf-8") as f:
483
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
484
+
485
+ with open(tgt_vocab_file, "w", encoding="utf-8") as f:
486
+ tgt_vocab = {v: k for k, v in self.decoder.items()}
487
+ f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
488
+
489
+ index = 0
490
+ with open(merges_file, "w", encoding="utf-8") as writer:
491
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
492
+ if index != token_index:
493
+ logger.warning(
494
+ f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
495
+ " Please check that the tokenizer is not corrupted!"
496
+ )
497
+ index = token_index
498
+ writer.write(" ".join(bpe_tokens) + "\n")
499
+ index += 1
500
+
501
+ return src_vocab_file, tgt_vocab_file, merges_file
502
+
503
+ def __getstate__(self):
504
+ state = self.__dict__.copy()
505
+ state["sm"] = None
506
+ return state
507
+
508
+ def __setstate__(self, d):
509
+ self.__dict__ = d
510
+
511
+ try:
512
+ import sacremoses
513
+ except ImportError:
514
+ raise ImportError(
515
+ "You need to install sacremoses to use XLMTokenizer. "
516
+ "See https://pypi.org/project/sacremoses/ for installation."
517
+ )
518
+
519
+ self.sm = sacremoses
llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__init__.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
18
+
19
+
20
+ _import_structure = {"tokenization_herbert": ["HerbertTokenizer"]}
21
+
22
+ try:
23
+ if not is_tokenizers_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_herbert_fast"] = ["HerbertTokenizerFast"]
29
+
30
+
31
+ if TYPE_CHECKING:
32
+ from .tokenization_herbert import HerbertTokenizer
33
+
34
+ try:
35
+ if not is_tokenizers_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ from .tokenization_herbert_fast import HerbertTokenizerFast
41
+
42
+ else:
43
+ import sys
44
+
45
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (784 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc ADDED
Binary file (18.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc ADDED
Binary file (5.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import json
16
+ import os
17
+ import re
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {
28
+ "vocab_file": "vocab.json",
29
+ "merges_file": "merges.txt",
30
+ }
31
+
32
+
33
+ # Copied from transformers.models.xlm.tokenization_xlm.get_pairs
34
+ def get_pairs(word):
35
+ """
36
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
37
+ strings)
38
+ """
39
+ pairs = set()
40
+ prev_char = word[0]
41
+ for char in word[1:]:
42
+ pairs.add((prev_char, char))
43
+ prev_char = char
44
+ return pairs
45
+
46
+
47
+ # Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct
48
+ def replace_unicode_punct(text):
49
+ """
50
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
51
+ """
52
+ text = text.replace(",", ",")
53
+ text = re.sub(r"。\s*", ". ", text)
54
+ text = text.replace("、", ",")
55
+ text = text.replace("”", '"')
56
+ text = text.replace("“", '"')
57
+ text = text.replace("∶", ":")
58
+ text = text.replace(":", ":")
59
+ text = text.replace("?", "?")
60
+ text = text.replace("《", '"')
61
+ text = text.replace("》", '"')
62
+ text = text.replace(")", ")")
63
+ text = text.replace("!", "!")
64
+ text = text.replace("(", "(")
65
+ text = text.replace(";", ";")
66
+ text = text.replace("1", "1")
67
+ text = text.replace("」", '"')
68
+ text = text.replace("「", '"')
69
+ text = text.replace("0", "0")
70
+ text = text.replace("3", "3")
71
+ text = text.replace("2", "2")
72
+ text = text.replace("5", "5")
73
+ text = text.replace("6", "6")
74
+ text = text.replace("9", "9")
75
+ text = text.replace("7", "7")
76
+ text = text.replace("8", "8")
77
+ text = text.replace("4", "4")
78
+ text = re.sub(r".\s*", ". ", text)
79
+ text = text.replace("~", "~")
80
+ text = text.replace("’", "'")
81
+ text = text.replace("…", "...")
82
+ text = text.replace("━", "-")
83
+ text = text.replace("〈", "<")
84
+ text = text.replace("〉", ">")
85
+ text = text.replace("【", "[")
86
+ text = text.replace("】", "]")
87
+ text = text.replace("%", "%")
88
+ return text
89
+
90
+
91
+ # Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char
92
+ def remove_non_printing_char(text):
93
+ """
94
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
95
+ """
96
+ output = []
97
+ for char in text:
98
+ cat = unicodedata.category(char)
99
+ if cat.startswith("C"):
100
+ continue
101
+ output.append(char)
102
+ return "".join(output)
103
+
104
+
105
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
106
+ def whitespace_tokenize(text):
107
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
108
+ text = text.strip()
109
+ if not text:
110
+ return []
111
+ tokens = text.split()
112
+ return tokens
113
+
114
+
115
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
116
+ class BasicTokenizer(object):
117
+ """
118
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
119
+
120
+ Args:
121
+ do_lower_case (`bool`, *optional*, defaults to `True`):
122
+ Whether or not to lowercase the input when tokenizing.
123
+ never_split (`Iterable`, *optional*):
124
+ Collection of tokens which will never be split during tokenization. Only has an effect when
125
+ `do_basic_tokenize=True`
126
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
127
+ Whether or not to tokenize Chinese characters.
128
+
129
+ This should likely be deactivated for Japanese (see this
130
+ [issue](https://github.com/huggingface/transformers/issues/328)).
131
+ strip_accents (`bool`, *optional*):
132
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
133
+ value for `lowercase` (as in the original BERT).
134
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
135
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
136
+ the full context of the words, such as contractions.
137
+ """
138
+
139
+ def __init__(
140
+ self,
141
+ do_lower_case=True,
142
+ never_split=None,
143
+ tokenize_chinese_chars=True,
144
+ strip_accents=None,
145
+ do_split_on_punc=True,
146
+ ):
147
+ if never_split is None:
148
+ never_split = []
149
+ self.do_lower_case = do_lower_case
150
+ self.never_split = set(never_split)
151
+ self.tokenize_chinese_chars = tokenize_chinese_chars
152
+ self.strip_accents = strip_accents
153
+ self.do_split_on_punc = do_split_on_punc
154
+
155
+ def tokenize(self, text, never_split=None):
156
+ """
157
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
158
+
159
+ Args:
160
+ never_split (`List[str]`, *optional*)
161
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
162
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
163
+ """
164
+ # union() returns a new set by concatenating the two sets.
165
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
166
+ text = self._clean_text(text)
167
+
168
+ # This was added on November 1st, 2018 for the multilingual and Chinese
169
+ # models. This is also applied to the English models now, but it doesn't
170
+ # matter since the English models were not trained on any Chinese data
171
+ # and generally don't have any Chinese data in them (there are Chinese
172
+ # characters in the vocabulary because Wikipedia does have some Chinese
173
+ # words in the English Wikipedia.).
174
+ if self.tokenize_chinese_chars:
175
+ text = self._tokenize_chinese_chars(text)
176
+ # prevents treating the same character with different unicode codepoints as different characters
177
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
178
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
179
+ split_tokens = []
180
+ for token in orig_tokens:
181
+ if token not in never_split:
182
+ if self.do_lower_case:
183
+ token = token.lower()
184
+ if self.strip_accents is not False:
185
+ token = self._run_strip_accents(token)
186
+ elif self.strip_accents:
187
+ token = self._run_strip_accents(token)
188
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
189
+
190
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
191
+ return output_tokens
192
+
193
+ def _run_strip_accents(self, text):
194
+ """Strips accents from a piece of text."""
195
+ text = unicodedata.normalize("NFD", text)
196
+ output = []
197
+ for char in text:
198
+ cat = unicodedata.category(char)
199
+ if cat == "Mn":
200
+ continue
201
+ output.append(char)
202
+ return "".join(output)
203
+
204
+ def _run_split_on_punc(self, text, never_split=None):
205
+ """Splits punctuation on a piece of text."""
206
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
207
+ return [text]
208
+ chars = list(text)
209
+ i = 0
210
+ start_new_word = True
211
+ output = []
212
+ while i < len(chars):
213
+ char = chars[i]
214
+ if _is_punctuation(char):
215
+ output.append([char])
216
+ start_new_word = True
217
+ else:
218
+ if start_new_word:
219
+ output.append([])
220
+ start_new_word = False
221
+ output[-1].append(char)
222
+ i += 1
223
+
224
+ return ["".join(x) for x in output]
225
+
226
+ def _tokenize_chinese_chars(self, text):
227
+ """Adds whitespace around any CJK character."""
228
+ output = []
229
+ for char in text:
230
+ cp = ord(char)
231
+ if self._is_chinese_char(cp):
232
+ output.append(" ")
233
+ output.append(char)
234
+ output.append(" ")
235
+ else:
236
+ output.append(char)
237
+ return "".join(output)
238
+
239
+ def _is_chinese_char(self, cp):
240
+ """Checks whether CP is the codepoint of a CJK character."""
241
+ # This defines a "chinese character" as anything in the CJK Unicode block:
242
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
243
+ #
244
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
245
+ # despite its name. The modern Korean Hangul alphabet is a different block,
246
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
247
+ # space-separated words, so they are not treated specially and handled
248
+ # like the all of the other languages.
249
+ if (
250
+ (cp >= 0x4E00 and cp <= 0x9FFF)
251
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
252
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
253
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
254
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
255
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
256
+ or (cp >= 0xF900 and cp <= 0xFAFF)
257
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
258
+ ): #
259
+ return True
260
+
261
+ return False
262
+
263
+ def _clean_text(self, text):
264
+ """Performs invalid character removal and whitespace cleanup on text."""
265
+ output = []
266
+ for char in text:
267
+ cp = ord(char)
268
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
269
+ continue
270
+ if _is_whitespace(char):
271
+ output.append(" ")
272
+ else:
273
+ output.append(char)
274
+ return "".join(output)
275
+
276
+
277
+ class HerbertTokenizer(PreTrainedTokenizer):
278
+ """
279
+ Construct a BPE tokenizer for HerBERT.
280
+
281
+ Peculiarities:
282
+
283
+ - uses BERT's pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a
284
+ punctuation character will be treated separately.
285
+
286
+ - Such pretokenized input is BPE subtokenized
287
+
288
+ This tokenizer inherits from [`XLMTokenizer`] which contains most of the methods. Users should refer to the
289
+ superclass for more information regarding methods.
290
+ """
291
+
292
+ vocab_files_names = VOCAB_FILES_NAMES
293
+
294
+ def __init__(
295
+ self,
296
+ vocab_file,
297
+ merges_file,
298
+ tokenizer_file=None,
299
+ cls_token="<s>",
300
+ unk_token="<unk>",
301
+ pad_token="<pad>",
302
+ mask_token="<mask>",
303
+ sep_token="</s>",
304
+ bos_token="<s>",
305
+ do_lowercase_and_remove_accent=False,
306
+ additional_special_tokens=[
307
+ "<special0>",
308
+ "<special1>",
309
+ "<special2>",
310
+ "<special3>",
311
+ "<special4>",
312
+ "<special5>",
313
+ "<special6>",
314
+ "<special7>",
315
+ "<special8>",
316
+ "<special9>",
317
+ ],
318
+ lang2id=None,
319
+ id2lang=None,
320
+ **kwargs,
321
+ ):
322
+ try:
323
+ import sacremoses
324
+ except ImportError:
325
+ raise ImportError(
326
+ "You need to install sacremoses to use HerbertTokenizer. "
327
+ "See https://pypi.org/project/sacremoses/ for installation."
328
+ )
329
+
330
+ self.sm = sacremoses
331
+
332
+ # cache of sm.MosesPunctNormalizer instance
333
+ self.cache_moses_punct_normalizer = {}
334
+ # cache of sm.MosesTokenizer instance
335
+ self.cache_moses_tokenizer = {}
336
+ self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
337
+ # True for current supported model (v1.2.0), False for XLM-17 & 100
338
+ self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
339
+ self.lang2id = lang2id
340
+ self.id2lang = id2lang
341
+ if lang2id is not None and id2lang is not None:
342
+ assert len(lang2id) == len(id2lang)
343
+
344
+ self.ja_word_tokenizer = None
345
+ self.zh_word_tokenizer = None
346
+
347
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
348
+ self.encoder = json.load(vocab_handle)
349
+ self.decoder = {v: k for k, v in self.encoder.items()}
350
+ with open(merges_file, encoding="utf-8") as merges_handle:
351
+ merges = merges_handle.read().split("\n")[:-1]
352
+ merges = [tuple(merge.split()[:2]) for merge in merges]
353
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
354
+ self.cache = {}
355
+
356
+ super().__init__(
357
+ unk_token=unk_token,
358
+ bos_token=bos_token,
359
+ sep_token=sep_token,
360
+ pad_token=pad_token,
361
+ cls_token=cls_token,
362
+ mask_token=mask_token,
363
+ additional_special_tokens=additional_special_tokens,
364
+ lang2id=lang2id,
365
+ id2lang=id2lang,
366
+ do_lowercase_and_remove_accent=do_lowercase_and_remove_accent,
367
+ tokenizer_file=None,
368
+ **kwargs,
369
+ )
370
+
371
+ self.bert_pre_tokenizer = BasicTokenizer(
372
+ do_lower_case=False,
373
+ never_split=self.all_special_tokens,
374
+ tokenize_chinese_chars=False,
375
+ strip_accents=False,
376
+ )
377
+
378
+ @property
379
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case
380
+ def do_lower_case(self):
381
+ return self.do_lowercase_and_remove_accent
382
+
383
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm
384
+ def moses_punct_norm(self, text, lang):
385
+ if lang not in self.cache_moses_punct_normalizer:
386
+ punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
387
+ self.cache_moses_punct_normalizer[lang] = punct_normalizer
388
+ else:
389
+ punct_normalizer = self.cache_moses_punct_normalizer[lang]
390
+ return punct_normalizer.normalize(text)
391
+
392
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize
393
+ def moses_tokenize(self, text, lang):
394
+ if lang not in self.cache_moses_tokenizer:
395
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
396
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
397
+ else:
398
+ moses_tokenizer = self.cache_moses_tokenizer[lang]
399
+ return moses_tokenizer.tokenize(text, return_str=False, escape=False)
400
+
401
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline
402
+ def moses_pipeline(self, text, lang):
403
+ text = replace_unicode_punct(text)
404
+ text = self.moses_punct_norm(text, lang)
405
+ text = remove_non_printing_char(text)
406
+ return text
407
+
408
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize
409
+ def ja_tokenize(self, text):
410
+ if self.ja_word_tokenizer is None:
411
+ try:
412
+ import Mykytea
413
+
414
+ self.ja_word_tokenizer = Mykytea.Mykytea(
415
+ f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
416
+ )
417
+ except (AttributeError, ImportError):
418
+ logger.error(
419
+ "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
420
+ " (https://github.com/chezou/Mykytea-python) with the following steps"
421
+ )
422
+ logger.error("1. git clone [email protected]:neubig/kytea.git && cd kytea")
423
+ logger.error("2. autoreconf -i")
424
+ logger.error("3. ./configure --prefix=$HOME/local")
425
+ logger.error("4. make && make install")
426
+ logger.error("5. pip install kytea")
427
+ raise
428
+ return list(self.ja_word_tokenizer.getWS(text))
429
+
430
+ @property
431
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size
432
+ def vocab_size(self):
433
+ return len(self.encoder)
434
+
435
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab
436
+ def get_vocab(self):
437
+ return dict(self.encoder, **self.added_tokens_encoder)
438
+
439
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe
440
+ def bpe(self, token):
441
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
442
+ if token in self.cache:
443
+ return self.cache[token]
444
+ pairs = get_pairs(word)
445
+
446
+ if not pairs:
447
+ return token + "</w>"
448
+
449
+ while True:
450
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
451
+ if bigram not in self.bpe_ranks:
452
+ break
453
+ first, second = bigram
454
+ new_word = []
455
+ i = 0
456
+ while i < len(word):
457
+ try:
458
+ j = word.index(first, i)
459
+ except ValueError:
460
+ new_word.extend(word[i:])
461
+ break
462
+ else:
463
+ new_word.extend(word[i:j])
464
+ i = j
465
+
466
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
467
+ new_word.append(first + second)
468
+ i += 2
469
+ else:
470
+ new_word.append(word[i])
471
+ i += 1
472
+ new_word = tuple(new_word)
473
+ word = new_word
474
+ if len(word) == 1:
475
+ break
476
+ else:
477
+ pairs = get_pairs(word)
478
+ word = " ".join(word)
479
+ if word == "\n </w>":
480
+ word = "\n</w>"
481
+ self.cache[token] = word
482
+ return word
483
+
484
+ def _tokenize(self, text):
485
+ pre_tokens = self.bert_pre_tokenizer.tokenize(text)
486
+
487
+ split_tokens = []
488
+ for token in pre_tokens:
489
+ if token:
490
+ split_tokens.extend(list(self.bpe(token).split(" ")))
491
+
492
+ return split_tokens
493
+
494
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id
495
+ def _convert_token_to_id(self, token):
496
+ """Converts a token (str) in an id using the vocab."""
497
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
498
+
499
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token
500
+ def _convert_id_to_token(self, index):
501
+ """Converts an index (integer) in a token (str) using the vocab."""
502
+ return self.decoder.get(index, self.unk_token)
503
+
504
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string
505
+ def convert_tokens_to_string(self, tokens):
506
+ """Converts a sequence of tokens (string) in a single string."""
507
+ out_string = "".join(tokens).replace("</w>", " ").strip()
508
+ return out_string
509
+
510
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens
511
+ def build_inputs_with_special_tokens(
512
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
513
+ ) -> List[int]:
514
+ """
515
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
516
+ adding special tokens. An XLM sequence has the following format:
517
+
518
+ - single sequence: `<s> X </s>`
519
+ - pair of sequences: `<s> A </s> B </s>`
520
+
521
+ Args:
522
+ token_ids_0 (`List[int]`):
523
+ List of IDs to which the special tokens will be added.
524
+ token_ids_1 (`List[int]`, *optional*):
525
+ Optional second list of IDs for sequence pairs.
526
+
527
+ Returns:
528
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
529
+
530
+ """
531
+ bos = [self.bos_token_id]
532
+ sep = [self.sep_token_id]
533
+
534
+ if token_ids_1 is None:
535
+ return bos + token_ids_0 + sep
536
+ return bos + token_ids_0 + sep + token_ids_1 + sep
537
+
538
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask
539
+ def get_special_tokens_mask(
540
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
541
+ ) -> List[int]:
542
+ """
543
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
544
+ special tokens using the tokenizer `prepare_for_model` method.
545
+
546
+ Args:
547
+ token_ids_0 (`List[int]`):
548
+ List of IDs.
549
+ token_ids_1 (`List[int]`, *optional*):
550
+ Optional second list of IDs for sequence pairs.
551
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
552
+ Whether or not the token list is already formatted with special tokens for the model.
553
+
554
+ Returns:
555
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
556
+ """
557
+
558
+ if already_has_special_tokens:
559
+ return super().get_special_tokens_mask(
560
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
561
+ )
562
+
563
+ if token_ids_1 is not None:
564
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
565
+ return [1] + ([0] * len(token_ids_0)) + [1]
566
+
567
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences
568
+ def create_token_type_ids_from_sequences(
569
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
570
+ ) -> List[int]:
571
+ """
572
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence
573
+ pair mask has the following format:
574
+
575
+ ```
576
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
577
+ | first sequence | second sequence |
578
+ ```
579
+
580
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
581
+
582
+ Args:
583
+ token_ids_0 (`List[int]`):
584
+ List of IDs.
585
+ token_ids_1 (`List[int]`, *optional*):
586
+ Optional second list of IDs for sequence pairs.
587
+
588
+ Returns:
589
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
590
+ """
591
+ sep = [self.sep_token_id]
592
+ cls = [self.cls_token_id]
593
+ if token_ids_1 is None:
594
+ return len(cls + token_ids_0 + sep) * [0]
595
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
596
+
597
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary
598
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
599
+ if not os.path.isdir(save_directory):
600
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
601
+ return
602
+ vocab_file = os.path.join(
603
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
604
+ )
605
+ merge_file = os.path.join(
606
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
607
+ )
608
+
609
+ with open(vocab_file, "w", encoding="utf-8") as f:
610
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
611
+
612
+ index = 0
613
+ with open(merge_file, "w", encoding="utf-8") as writer:
614
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
615
+ if index != token_index:
616
+ logger.warning(
617
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
618
+ " Please check that the tokenizer is not corrupted!"
619
+ )
620
+ index = token_index
621
+ writer.write(" ".join(bpe_tokens) + "\n")
622
+ index += 1
623
+
624
+ return vocab_file, merge_file
625
+
626
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__
627
+ def __getstate__(self):
628
+ state = self.__dict__.copy()
629
+ state["sm"] = None
630
+ return state
631
+
632
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__
633
+ def __setstate__(self, d):
634
+ self.__dict__ = d
635
+
636
+ try:
637
+ import sacremoses
638
+ except ImportError:
639
+ raise ImportError(
640
+ "You need to install sacremoses to use XLMTokenizer. "
641
+ "See https://pypi.org/project/sacremoses/ for installation."
642
+ )
643
+
644
+ self.sm = sacremoses
llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import List, Optional, Tuple
17
+
18
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
19
+ from ...utils import logging
20
+ from .tokenization_herbert import HerbertTokenizer
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
26
+
27
+
28
+ class HerbertTokenizerFast(PreTrainedTokenizerFast):
29
+ """
30
+ Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).
31
+
32
+ Peculiarities:
33
+
34
+ - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
35
+ a punctuation character will be treated separately.
36
+
37
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
38
+ superclass for more information regarding methods.
39
+
40
+ Args:
41
+ vocab_file (`str`):
42
+ Path to the vocabulary file.
43
+ merges_file (`str`):
44
+ Path to the merges file.
45
+ """
46
+
47
+ vocab_files_names = VOCAB_FILES_NAMES
48
+ slow_tokenizer_class = HerbertTokenizer
49
+
50
+ def __init__(
51
+ self,
52
+ vocab_file=None,
53
+ merges_file=None,
54
+ tokenizer_file=None,
55
+ cls_token="<s>",
56
+ unk_token="<unk>",
57
+ pad_token="<pad>",
58
+ mask_token="<mask>",
59
+ sep_token="</s>",
60
+ **kwargs,
61
+ ):
62
+ super().__init__(
63
+ vocab_file,
64
+ merges_file,
65
+ tokenizer_file=tokenizer_file,
66
+ cls_token=cls_token,
67
+ unk_token=unk_token,
68
+ pad_token=pad_token,
69
+ mask_token=mask_token,
70
+ sep_token=sep_token,
71
+ **kwargs,
72
+ )
73
+
74
+ def build_inputs_with_special_tokens(
75
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
76
+ ) -> List[int]:
77
+ """
78
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
79
+ adding special tokens. An HerBERT, like BERT sequence has the following format:
80
+
81
+ - single sequence: `<s> X </s>`
82
+ - pair of sequences: `<s> A </s> B </s>`
83
+
84
+ Args:
85
+ token_ids_0 (`List[int]`):
86
+ List of IDs to which the special tokens will be added.
87
+ token_ids_1 (`List[int]`, *optional*):
88
+ Optional second list of IDs for sequence pairs.
89
+
90
+ Returns:
91
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
92
+ """
93
+
94
+ cls = [self.cls_token_id]
95
+ sep = [self.sep_token_id]
96
+ if token_ids_1 is None:
97
+ return cls + token_ids_0 + sep
98
+
99
+ return cls + token_ids_0 + sep + token_ids_1 + sep
100
+
101
+ def get_special_tokens_mask(
102
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
103
+ ) -> List[int]:
104
+ """
105
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
106
+ special tokens using the tokenizer `prepare_for_model` method.
107
+
108
+ Args:
109
+ token_ids_0 (`List[int]`):
110
+ List of IDs.
111
+ token_ids_1 (`List[int]`, *optional*):
112
+ Optional second list of IDs for sequence pairs.
113
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
114
+ Whether or not the token list is already formatted with special tokens for the model.
115
+
116
+ Returns:
117
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
118
+ """
119
+ if already_has_special_tokens:
120
+ return super().get_special_tokens_mask(
121
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
122
+ )
123
+
124
+ if token_ids_1 is None:
125
+ return [1] + ([0] * len(token_ids_0)) + [1]
126
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
127
+
128
+ def create_token_type_ids_from_sequences(
129
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
130
+ ) -> List[int]:
131
+ """
132
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like
133
+ BERT sequence pair mask has the following format:
134
+
135
+ ```
136
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
137
+ | first sequence | second sequence |
138
+ ```
139
+
140
+ Args:
141
+ token_ids_0 (`List[int]`):
142
+ List of IDs.
143
+ token_ids_1 (`List[int]`, *optional*):
144
+ Optional second list of IDs for sequence pairs.
145
+
146
+ Returns:
147
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
148
+ """
149
+ sep = [self.sep_token_id]
150
+ cls = [self.cls_token_id]
151
+
152
+ if token_ids_1 is None:
153
+ return len(cls + token_ids_0 + sep) * [0]
154
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
155
+
156
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
157
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
158
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/configuration_idefics.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/perceiver.cpython-310.pyc ADDED
Binary file (7.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/configuration_idefics.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ Idefics model configuration"""
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class IdeficsVisionConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
35
+ Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
36
+ with the defaults will yield a similar configuration to that of the Idefics-9B.
37
+
38
+ e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ hidden_size (`int`, *optional*, defaults to 768):
45
+ Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`)
46
+ image_size (`int`, *optional*, defaults to 224):
47
+ The size (resolution) of each image.
48
+ intermediate_size (`int`, *optional*, defaults to 5120):
49
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ patch_size (`int`, *optional*, defaults to 14):
51
+ The size (resolution) of each patch.
52
+ num_hidden_layers (`int`, *optional*, defaults to 32):
53
+ Number of hidden layers in the Transformer encoder.
54
+ num_attention_heads (`int`, *optional*, defaults to 16):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ image_num_channels (`int`, *optional*, defaults to `3`):
57
+ Number of image channels.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
61
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
62
+ The epsilon used by the layer normalization layers.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ initializer_factor (`float`, *optional*, defaults to 1.0):
68
+ A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
69
+ testing).
70
+ initializer_range (`float`, *optional*, defaults to 0.02):
71
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
72
+ """
73
+
74
+ model_type = "idefics"
75
+ attribute_map = {
76
+ "hidden_size": "embed_dim",
77
+ }
78
+
79
+ def __init__(
80
+ self,
81
+ embed_dim=768,
82
+ image_size=224,
83
+ intermediate_size=5120,
84
+ patch_size=14,
85
+ num_hidden_layers=32,
86
+ num_attention_heads=16,
87
+ num_channels=3,
88
+ hidden_act="gelu",
89
+ layer_norm_eps=1e-5,
90
+ attention_dropout=0.0,
91
+ initializer_range=0.02,
92
+ initializer_factor=1.0,
93
+ **kwargs,
94
+ ):
95
+ self.embed_dim = embed_dim
96
+ self.image_size = image_size
97
+ self.intermediate_size = intermediate_size
98
+ self.patch_size = patch_size
99
+ self.num_hidden_layers = num_hidden_layers
100
+ self.num_attention_heads = num_attention_heads
101
+ self.num_channels = num_channels
102
+ self.layer_norm_eps = layer_norm_eps
103
+ self.attention_dropout = attention_dropout
104
+ self.initializer_range = initializer_range
105
+ self.initializer_factor = initializer_factor
106
+ self.hidden_act = hidden_act
107
+
108
+ super().__init__(**kwargs)
109
+
110
+
111
+ class IdeficsPerceiverConfig(PretrainedConfig):
112
+ r"""
113
+ This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
114
+ Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
115
+ with the defaults will yield a similar configuration to that of the Idefics-9B.
116
+
117
+ e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
118
+
119
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
120
+ documentation from [`PretrainedConfig`] for more information.
121
+
122
+ Args:
123
+ use_resampler (`bool`, *optional*, defaults to `False`):
124
+ Whether or not to use the resampler
125
+ resampler_n_latents (`int`, *optional*, defaults to ):
126
+ Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
127
+ resampler_depth (`int`, *optional*, defaults to 6):
128
+ Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
129
+ resampler_n_heads (`int`, *optional*, defaults to 16):
130
+ Number of heads in each Transformer block (for multi-headed self-attention).
131
+ resampler_head_dim (`int`, *optional*, defaults to 96):
132
+ Dimensionality of each head projection in the Transformer block.
133
+ qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
134
+ Whether or not to use qk layer norms in perceiver
135
+ """
136
+
137
+ model_type = "idefics"
138
+
139
+ def __init__(
140
+ self,
141
+ use_resampler=False,
142
+ resampler_n_latents=64,
143
+ resampler_depth=6,
144
+ resampler_n_heads=16,
145
+ resampler_head_dim=96,
146
+ qk_layer_norms_perceiver=False,
147
+ **kwargs,
148
+ ):
149
+ self.use_resampler = use_resampler
150
+ self.resampler_n_latents = resampler_n_latents
151
+ self.resampler_depth = resampler_depth
152
+ self.resampler_n_heads = resampler_n_heads
153
+ self.resampler_head_dim = resampler_head_dim
154
+ self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
155
+
156
+ super().__init__(**kwargs)
157
+
158
+
159
+ class IdeficsConfig(PretrainedConfig):
160
+ r"""
161
+ This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
162
+ Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
163
+ with the defaults will yield a similar configuration to that of the Idefics-9B.
164
+
165
+ e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
166
+
167
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
168
+ documentation from [`PretrainedConfig`] for more information.
169
+
170
+ Args:
171
+ additional_vocab_size (`int`, *optional`, defaults to 0):
172
+ Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
173
+ are always trainable whereas regular vocab tokens can be frozen or not.
174
+ vocab_size (`int`, *optional*, defaults to 32000):
175
+ Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
176
+ `inputs_ids` passed when calling [`~IdeficsModel`]
177
+ hidden_size (`int`, *optional*, defaults to 4096):
178
+ Dimension of the hidden representations.
179
+ intermediate_size (`int`, *optional*, defaults to 11008):
180
+ Dimension of the MLP representations.
181
+ num_hidden_layers (`int`, *optional*, defaults to 32):
182
+ Number of hidden layers in the Transformer encoder.
183
+ num_attention_heads (`int`, *optional*, defaults to 32):
184
+ Number of attention heads for each attention layer in the Transformer encoder.
185
+ dropout (`float`, *optional*, defaults to 0.0):
186
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
187
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
188
+ The non-linear activation function (function or string) in the decoder.
189
+ initializer_range (`float`, *optional*, defaults to 0.02):
190
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
191
+ alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
192
+ Initialization type for the alphas.
193
+ alphas_initializer_range (`float`, *optional*, defaults to 0.0):
194
+ The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
195
+ Attention.
196
+ alpha_type (`str`, *optional*, defaults to `"float"`):
197
+ Whether the gating alphas should be vectors or single floats.
198
+ rms_norm_eps (`float`, *optional*, defaults to 1e-6):
199
+ The epsilon used by the rms normalization layers.
200
+ use_cache (`bool`, *optional*, defaults to `True`):
201
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
202
+ relevant if `config.is_decoder=True`.
203
+ pad_token_id (`int`, *optional*, defaults to 0)
204
+ Padding token id.
205
+ bos_token_id (`int`, *optional*, defaults to 1)
206
+ Beginning of stream token id.
207
+ eos_token_id (`int`, *optional*, defaults to 2)
208
+ End of stream token id.
209
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
210
+ Whether to tie weight embeddings
211
+ cross_layer_interval (`int`, *optional*, default to 1)
212
+ Interval for cross attention (from text to image) layers.
213
+ qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
214
+ freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
215
+ freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
216
+ Exceptions to freezing text layers when `freeze_text_layers` is `True`
217
+ freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
218
+ freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
219
+ freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
220
+ Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
221
+ use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
222
+ vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
223
+ perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
224
+
225
+ Example:
226
+
227
+ ```python
228
+ >>> from transformers import IdeficsModel, IdeficsConfig
229
+
230
+ >>> # Initializing a Idefics idefics-9b style configuration
231
+ >>> configuration = IdeficsConfig()
232
+
233
+ >>> # Initializing a model from the idefics-9b style configuration
234
+ >>> model = IdeficsModel(configuration)
235
+
236
+ >>> # Accessing the model configuration
237
+ >>> configuration = model.config
238
+ ```"""
239
+
240
+ model_type = "idefics"
241
+ is_composition = False
242
+
243
+ def __init__(
244
+ self,
245
+ vocab_size=32000,
246
+ additional_vocab_size=0,
247
+ hidden_size=4096,
248
+ intermediate_size=11008,
249
+ num_hidden_layers=32,
250
+ num_attention_heads=32,
251
+ dropout=0.0,
252
+ hidden_act="silu",
253
+ initializer_range=0.02,
254
+ alpha_initializer="zeros",
255
+ alphas_initializer_range=0.0,
256
+ alpha_type="float",
257
+ rms_norm_eps=1e-6,
258
+ use_cache=True,
259
+ pad_token_id=0,
260
+ bos_token_id=1,
261
+ eos_token_id=2,
262
+ tie_word_embeddings=False,
263
+ cross_layer_interval=1,
264
+ qk_layer_norms=False,
265
+ freeze_text_layers=True,
266
+ freeze_text_module_exceptions=[],
267
+ freeze_lm_head=False,
268
+ freeze_vision_layers=True,
269
+ freeze_vision_module_exceptions=[],
270
+ use_resampler=False,
271
+ vision_config=None,
272
+ perceiver_config=None,
273
+ **kwargs,
274
+ ):
275
+ self.vocab_size = vocab_size
276
+ self.additional_vocab_size = additional_vocab_size
277
+ self.hidden_size = hidden_size
278
+ self.intermediate_size = intermediate_size
279
+ self.num_hidden_layers = num_hidden_layers
280
+ self.num_attention_heads = num_attention_heads
281
+ self.dropout = dropout
282
+ self.hidden_act = hidden_act
283
+ self.initializer_range = initializer_range
284
+ self.alpha_initializer = alpha_initializer
285
+ self.alphas_initializer_range = alphas_initializer_range
286
+ self.alpha_type = alpha_type
287
+ self.rms_norm_eps = rms_norm_eps
288
+ self.use_cache = use_cache
289
+
290
+ self.cross_layer_interval = cross_layer_interval
291
+ self.qk_layer_norms = qk_layer_norms
292
+ self.freeze_vision_layers = freeze_vision_layers
293
+
294
+ self.freeze_text_layers = freeze_text_layers
295
+ self.freeze_text_module_exceptions = freeze_text_module_exceptions
296
+ self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
297
+ self.freeze_lm_head = freeze_lm_head
298
+
299
+ self.use_resampler = use_resampler
300
+
301
+ if perceiver_config is None:
302
+ self.perceiver_config = IdeficsPerceiverConfig()
303
+ elif isinstance(perceiver_config, dict):
304
+ self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config)
305
+ elif isinstance(perceiver_config, IdeficsPerceiverConfig):
306
+ self.perceiver_config = perceiver_config
307
+
308
+ if vision_config is None:
309
+ self.vision_config = IdeficsVisionConfig()
310
+ elif isinstance(vision_config, dict):
311
+ self.vision_config = IdeficsVisionConfig(**vision_config)
312
+ elif isinstance(vision_config, IdeficsVisionConfig):
313
+ self.vision_config = vision_config
314
+
315
+ super().__init__(
316
+ pad_token_id=pad_token_id,
317
+ bos_token_id=bos_token_id,
318
+ eos_token_id=eos_token_id,
319
+ tie_word_embeddings=tie_word_embeddings,
320
+ **kwargs,
321
+ )
322
+
323
+ # IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
324
+ # PretrainedConfig.from_dict first instantiates the class with the config dict and only then
325
+ # updates the config object with `kwargs` from from_pretrained, so during the instantiation
326
+ # of this object many attributes have default values and haven't yet been overridden.
327
+ # Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/image_processing_idefics.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Idefics."""
16
+
17
+ from typing import Callable, Dict, List, Optional, Union
18
+
19
+ from PIL import Image
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
22
+ from ...image_transforms import resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ ChannelDimension,
25
+ ImageInput,
26
+ PILImageResampling,
27
+ make_list_of_images,
28
+ to_numpy_array,
29
+ valid_images,
30
+ )
31
+ from ...utils import TensorType, is_torch_available
32
+
33
+
34
+ IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073]
35
+ IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711]
36
+
37
+
38
+ def convert_to_rgb(image):
39
+ # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
40
+ # for transparent images. The call to `alpha_composite` handles this case
41
+ if image.mode == "RGB":
42
+ return image
43
+
44
+ image_rgba = image.convert("RGBA")
45
+ background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
46
+ alpha_composite = Image.alpha_composite(background, image_rgba)
47
+ alpha_composite = alpha_composite.convert("RGB")
48
+ return alpha_composite
49
+
50
+
51
+ class IdeficsImageProcessor(BaseImageProcessor):
52
+ r"""
53
+ Constructs a Idefics image processor.
54
+
55
+ Args:
56
+ image_size (`int`, *optional*, defaults to 224):
57
+ Resize to image size
58
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
59
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
60
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
61
+ overridden by the `image_mean` parameter in the `preprocess` method.
62
+ image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
63
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
64
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
65
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
66
+ image_num_channels (`int`, *optional*, defaults to 3):
67
+ Number of image channels.
68
+ """
69
+
70
+ model_input_names = ["pixel_values"]
71
+
72
+ def __init__(
73
+ self,
74
+ image_size: int = 224,
75
+ image_mean: Optional[Union[float, List[float]]] = None,
76
+ image_std: Optional[Union[float, List[float]]] = None,
77
+ image_num_channels: Optional[int] = 3,
78
+ **kwargs,
79
+ ) -> None:
80
+ super().__init__(**kwargs)
81
+
82
+ self.image_size = image_size
83
+ self.image_num_channels = image_num_channels
84
+ self.image_mean = image_mean
85
+ self.image_std = image_std
86
+
87
+ def preprocess(
88
+ self,
89
+ images: ImageInput,
90
+ image_num_channels: Optional[int] = 3,
91
+ image_size: Optional[Dict[str, int]] = None,
92
+ image_mean: Optional[Union[float, List[float]]] = None,
93
+ image_std: Optional[Union[float, List[float]]] = None,
94
+ transform: Callable = None,
95
+ **kwargs,
96
+ ) -> TensorType.PYTORCH:
97
+ """
98
+ Preprocess a batch of images.
99
+
100
+ Args:
101
+ images (`ImageInput`):
102
+ A list of images to preprocess.
103
+ image_size (`int`, *optional*, defaults to `self.image_size`):
104
+ Resize to image size
105
+ image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`):
106
+ Number of image channels.
107
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
108
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
109
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can
110
+ be overridden by the `image_mean` parameter in the `preprocess` method.
111
+ image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
112
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
113
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess`
114
+ method. Can be overridden by the `image_std` parameter in the `preprocess` method.
115
+ transform (`Callable`, *optional*, defaults to `None`):
116
+ A custom transform function that accepts a single image can be passed for training. For example,
117
+ `torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
118
+ assumed - and then a preset of inference-specific transforms will be applied to the images
119
+
120
+ Returns:
121
+ a PyTorch tensor of the processed images
122
+
123
+ """
124
+ image_size = image_size if image_size is not None else self.image_size
125
+ image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels
126
+ image_mean = image_mean if image_mean is not None else self.image_mean
127
+ image_std = image_std if image_std is not None else self.image_std
128
+ size = (image_size, image_size)
129
+
130
+ if isinstance(images, list) and len(images) == 0:
131
+ return []
132
+
133
+ images = make_list_of_images(images)
134
+
135
+ if not valid_images(images):
136
+ raise ValueError(
137
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
138
+ "torch.Tensor, tf.Tensor or jax.ndarray."
139
+ )
140
+
141
+ # For training a user needs to pass their own set of transforms as a Callable.
142
+ # For reference this is what was used in the original IDEFICS training:
143
+ # transform = transforms.Compose([
144
+ # convert_to_rgb,
145
+ # transforms.RandomResizedCrop((size, size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
146
+ # transforms.ToTensor(),
147
+ # transforms.Normalize(mean=image_mean, std=image_std),
148
+ # ])
149
+ if transform is not None:
150
+ if not is_torch_available():
151
+ raise ImportError("To pass in `transform` torch must be installed")
152
+ import torch
153
+
154
+ images = [transform(x) for x in images]
155
+ return torch.stack(images)
156
+
157
+ # for inference we do the exact transforms that were used to train IDEFICS
158
+ images = [convert_to_rgb(x) for x in images]
159
+ # further transforms expect numpy arrays
160
+ images = [to_numpy_array(x) for x in images]
161
+ images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images]
162
+ images = [self.rescale(image=image, scale=1 / 255) for image in images]
163
+ images = [self.normalize(x, mean=image_mean, std=image_std) for x in images]
164
+ images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images]
165
+ # TODO: this converts to torch tensors - switch to convert_to_tensors once it becomes available
166
+ images = BatchFeature(data={"pixel_values": images}, tensor_type=TensorType.PYTORCH)["pixel_values"]
167
+
168
+ return images
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/modeling_idefics.py ADDED
@@ -0,0 +1,1588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Idefics model."""
21
+ from dataclasses import dataclass
22
+ from typing import Any, Dict, List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.nn.functional as F
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import CrossEntropyLoss
29
+
30
+ from ... import PreTrainedModel
31
+ from ...activations import ACT2FN
32
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask_for_sdpa
33
+ from ...modeling_outputs import ModelOutput
34
+ from ...modeling_utils import PretrainedConfig
35
+ from ...pytorch_utils import ALL_LAYERNORM_LAYERS
36
+ from ...utils import (
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from .configuration_idefics import IdeficsConfig
43
+ from .perceiver import IdeficsPerceiverResampler
44
+ from .vision import IdeficsVisionTransformer
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CONFIG_FOR_DOC = "IdeficsConfig"
50
+
51
+
52
+ from ..deprecated._archive_maps import IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ @dataclass
56
+ class IdeficsBaseModelOutputWithPast(ModelOutput):
57
+ """
58
+ Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding).
59
+
60
+ Args:
61
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
62
+ Sequence of hidden-states at the output of the last layer of the model.
63
+
64
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
65
+ hidden_size)` is output.
66
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
67
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
68
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
69
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
70
+ encoder_sequence_length, embed_size_per_head)`.
71
+
72
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
73
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
74
+ input) to speed up sequential decoding.
75
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
76
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
77
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
78
+
79
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
80
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
81
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
82
+ sequence_length)`.
83
+
84
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
85
+ heads.
86
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
87
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
88
+ sequence_length, hidden_size)`.
89
+
90
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
91
+ """
92
+
93
+ last_hidden_state: torch.FloatTensor = None
94
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
95
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
96
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
97
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
98
+
99
+
100
+ @dataclass
101
+ class IdeficsCausalLMOutputWithPast(ModelOutput):
102
+ """
103
+ Base class for Idefics causal language model (or autoregressive) outputs.
104
+
105
+ Args:
106
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
107
+ Language modeling loss (for next-token prediction).
108
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
109
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
110
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
111
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
112
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
113
+
114
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
115
+ `past_key_values` input) to speed up sequential decoding.
116
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
117
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
118
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
119
+
120
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
121
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
122
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
123
+ sequence_length)`.
124
+
125
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
126
+ heads.
127
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
128
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
129
+ sequence_length, hidden_size)`.
130
+
131
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
132
+ """
133
+
134
+ loss: Optional[torch.FloatTensor] = None
135
+ logits: torch.FloatTensor = None
136
+ past_key_values: Optional[List[torch.FloatTensor]] = None
137
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
138
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
139
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
140
+
141
+
142
+ def expand_inputs_for_generation(
143
+ input_ids,
144
+ expand_size=1,
145
+ is_encoder_decoder=False,
146
+ attention_mask=None,
147
+ encoder_outputs=None,
148
+ **model_kwargs,
149
+ ):
150
+ expanded_return_idx = (
151
+ torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device)
152
+ )
153
+ input_ids = input_ids.index_select(0, expanded_return_idx)
154
+ model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None)
155
+ model_kwargs["image_encoder_embeddings"] = model_kwargs.get("image_encoder_embeddings", None)
156
+ model_kwargs["perceiver_embeddings"] = model_kwargs.get("perceiver_embeddings", None)
157
+ model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask", None)
158
+
159
+ if "token_type_ids" in model_kwargs:
160
+ token_type_ids = model_kwargs["token_type_ids"]
161
+ model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx)
162
+
163
+ if attention_mask is not None:
164
+ model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
165
+
166
+ if model_kwargs["image_attention_mask"] is not None:
167
+ model_kwargs["image_attention_mask"] = model_kwargs["image_attention_mask"].index_select(
168
+ 0, expanded_return_idx
169
+ )
170
+
171
+ if model_kwargs["pixel_values"] is not None:
172
+ model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx)
173
+
174
+ elif model_kwargs["image_encoder_embeddings"] is not None:
175
+ model_kwargs["image_encoder_embeddings"] = model_kwargs["image_encoder_embeddings"].index_select(
176
+ 0, expanded_return_idx
177
+ )
178
+
179
+ elif model_kwargs["perceiver_embeddings"] is not None:
180
+ model_kwargs["perceiver_embeddings"] = model_kwargs["perceiver_embeddings"].index_select(
181
+ 0, expanded_return_idx
182
+ )
183
+
184
+ return input_ids, model_kwargs
185
+
186
+
187
+ def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs):
188
+ token_type_ids = kwargs.get("token_type_ids", None)
189
+ # only last token for inputs_ids if past is defined in kwargs
190
+ if past_key_values:
191
+ input_ids = input_ids[:, -1].unsqueeze(-1)
192
+ if token_type_ids is not None:
193
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
194
+
195
+ attention_mask = kwargs.get("attention_mask", None)
196
+ position_ids = kwargs.get("position_ids", None)
197
+
198
+ if attention_mask is not None and position_ids is None:
199
+ # create position_ids on the fly for batch generation
200
+ position_ids = attention_mask.long().cumsum(-1) - 1
201
+ position_ids.masked_fill_(attention_mask == 0, 1)
202
+ if past_key_values:
203
+ position_ids = position_ids[:, -1].unsqueeze(-1)
204
+
205
+ pixel_values = kwargs.get("pixel_values", None)
206
+ image_encoder_embeddings = kwargs.get("image_encoder_embeddings", None)
207
+ perceiver_embeddings = kwargs.get("perceiver_embeddings", None)
208
+ image_attention_mask = kwargs.get("image_attention_mask", None)
209
+ interpolate_pos_encoding = kwargs.get("interpolate_pos_encoding", False)
210
+
211
+ return {
212
+ "input_ids": input_ids,
213
+ "past_key_values": past_key_values,
214
+ "use_cache": kwargs.get("use_cache"),
215
+ "position_ids": position_ids,
216
+ "attention_mask": attention_mask,
217
+ "token_type_ids": token_type_ids,
218
+ "pixel_values": pixel_values,
219
+ "image_encoder_embeddings": image_encoder_embeddings,
220
+ "perceiver_embeddings": perceiver_embeddings,
221
+ "image_attention_mask": image_attention_mask,
222
+ "interpolate_pos_encoding": interpolate_pos_encoding,
223
+ }
224
+
225
+
226
+ def freeze_model(model, module_exceptions=[]):
227
+ mapping = {
228
+ "LayerNorm": nn.LayerNorm,
229
+ "Linear": nn.Linear,
230
+ "Embedding": nn.Embedding,
231
+ }
232
+ module_exceptions_mapped = [mapping[m] for m in module_exceptions]
233
+ for module in model.modules():
234
+ if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped):
235
+ module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes
236
+ else:
237
+ module.requires_grad_(False)
238
+ return model
239
+
240
+
241
+ class IdeficsDecoupledEmbedding(nn.Embedding):
242
+ # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/sparse.html#Embedding
243
+ """
244
+ Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the
245
+ regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0,
246
+ then it will create `num_additional_embeddings` additional parameters that are always trained. If
247
+ `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`.
248
+ """
249
+
250
+ def __init__(
251
+ self,
252
+ num_embeddings,
253
+ num_additional_embeddings,
254
+ embedding_dim,
255
+ partially_freeze: Optional[bool] = False,
256
+ device=None,
257
+ dtype=None,
258
+ padding_idx=None,
259
+ **kwargs,
260
+ ) -> None:
261
+ """
262
+ Args:
263
+ num_embeddings (`int`):
264
+ Size of the dictionary of embeddings
265
+ num_additional_embeddings (`int`):
266
+ Number of additional embeddings. Only useful when you `partially_freeze=True`.
267
+ embedding_dim (`int`):
268
+ The size of each embedding vector
269
+ partially_freeze: (`bool`, *optional*, defaults to `False`):
270
+ If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.
271
+ padding_idx (`int`, *optional*):
272
+ The padding index (needs to be less than num_embeddings)
273
+
274
+ Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`,
275
+ `max_norm` or `norm_type`. We are not supporting these.
276
+ """
277
+ if padding_idx is not None and padding_idx > num_embeddings:
278
+ raise ValueError(f"padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}")
279
+ super().__init__(
280
+ num_embeddings=num_embeddings,
281
+ embedding_dim=embedding_dim,
282
+ device=device,
283
+ dtype=dtype,
284
+ padding_idx=padding_idx,
285
+ **kwargs,
286
+ )
287
+ self.num_embeddings = num_embeddings
288
+ self.padding_idx = padding_idx
289
+ self.num_additional_embeddings = num_additional_embeddings
290
+ self.partially_freeze = partially_freeze
291
+
292
+ if partially_freeze:
293
+ self.weight.requires_grad_(False)
294
+
295
+ if self.num_additional_embeddings > 0:
296
+ self.additional_embedding = nn.Embedding(
297
+ num_embeddings=self.num_additional_embeddings,
298
+ embedding_dim=embedding_dim,
299
+ device=device,
300
+ dtype=dtype,
301
+ )
302
+
303
+ def forward(self, input_ids):
304
+ """
305
+ we have 2 embeddings, with different indices - one pretrained self.weight and another
306
+ self.additional_embedding.weight that is being trained.
307
+
308
+ in order to make a lookup of the input ids, we:
309
+ 1. find out the indices of the entries belonging to the 2nd embedding
310
+ 2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd
311
+ embedding starts from 0 and not num_embeddings
312
+ 3. perform the 2nd embedding lookup
313
+ 4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index
314
+ 5. perform the 1st embedding lookup
315
+ 6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup
316
+
317
+ note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but
318
+ then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices -
319
+ i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are
320
+ usually relatively short it's probably not faster or if faster not by much - but might be a good idea to
321
+ measure.
322
+
323
+ """
324
+ if self.num_additional_embeddings == 0:
325
+ return F.embedding(input_ids, self.weight)
326
+
327
+ # Clone so that we don't modify the original input_ids later on
328
+ input_ids = input_ids.clone()
329
+ additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
330
+ input_ids_additional_vocab = input_ids[additional_vocab_indices]
331
+ additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings)
332
+
333
+ # for successful lookup replace input_ids with 0, the results of these will be discarded anyway
334
+ input_ids[additional_vocab_indices] = 0
335
+ full_vector = F.embedding(input_ids, self.weight)
336
+
337
+ # overwrite the records with high indices
338
+ full_vector[additional_vocab_indices] = additional_embeddings
339
+
340
+ return full_vector
341
+
342
+ def extra_repr(self) -> str:
343
+ return "num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}".format(
344
+ self.num_embeddings,
345
+ self.num_additional_embeddings,
346
+ self.embedding_dim,
347
+ self.partially_freeze,
348
+ )
349
+
350
+
351
+ class IdeficsDecoupledLinear(nn.Linear):
352
+ # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
353
+ """
354
+ Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
355
+ regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
356
+ then it will create `out_additional_features * in_features` additional parameters that are always trained. If
357
+ `out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
358
+ """
359
+
360
+ def __init__(
361
+ self,
362
+ in_features: int,
363
+ out_features: int,
364
+ out_additional_features: int = 0,
365
+ bias: bool = True,
366
+ partially_freeze: bool = True,
367
+ device=None,
368
+ dtype=None,
369
+ ) -> None:
370
+ """
371
+ out_additional_features: int. Number of additional trainable dimensions. Only makes sense when
372
+ `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra
373
+ parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear.
374
+ """
375
+ super().__init__(in_features, out_features, bias, device, dtype)
376
+ self.out_additional_features = out_additional_features
377
+ self.partially_freeze = partially_freeze
378
+
379
+ self.in_features = in_features
380
+ self.out_features = out_features
381
+
382
+ if partially_freeze:
383
+ self.weight.requires_grad_(False)
384
+ if bias:
385
+ self.bias.requires_grad_(False)
386
+
387
+ if out_additional_features > 0:
388
+ self.additional_fc = nn.Linear(
389
+ in_features=in_features,
390
+ out_features=out_additional_features,
391
+ bias=bias,
392
+ device=device,
393
+ dtype=dtype,
394
+ )
395
+
396
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
397
+ output = F.linear(input, self.weight, self.bias)
398
+
399
+ if self.out_additional_features > 0:
400
+ additional_features = self.additional_fc(input)
401
+ output = torch.cat((output, additional_features), -1)
402
+
403
+ return output
404
+
405
+ def extra_repr(self) -> str:
406
+ """Overwriting `nn.Linear.extra_repr` to include new parameters."""
407
+ return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format(
408
+ self.in_features,
409
+ self.out_features,
410
+ self.out_additional_features,
411
+ self.bias is not None,
412
+ self.partially_freeze,
413
+ )
414
+
415
+
416
+ # this was adapted from LlamaRMSNorm
417
+ class IdeficsRMSNorm(nn.Module):
418
+ def __init__(self, hidden_size, eps=1e-6):
419
+ """
420
+ IdeficsRMSNorm is equivalent to T5LayerNorm
421
+ """
422
+ super().__init__()
423
+ self.weight = nn.Parameter(torch.ones(hidden_size))
424
+ self.variance_epsilon = eps
425
+
426
+ def forward(self, hidden_states):
427
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
428
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
429
+
430
+ # convert into half-precision if necessary
431
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
432
+ hidden_states = hidden_states.to(self.weight.dtype)
433
+
434
+ return self.weight * hidden_states
435
+
436
+
437
+ ALL_LAYERNORM_LAYERS.append(IdeficsRMSNorm)
438
+
439
+
440
+ # this was adapted from LlamaRotaryEmbedding
441
+ class IdeficsEmbedding(torch.nn.Module):
442
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
443
+ super().__init__()
444
+
445
+ self.dim = dim
446
+ self.max_position_embeddings = max_position_embeddings
447
+ self.base = base
448
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
449
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
450
+
451
+ # Build here to make `torch.jit.trace` work.
452
+ self._set_cos_sin_cache(
453
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
454
+ )
455
+
456
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
457
+ self.max_seq_len_cached = seq_len
458
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
459
+
460
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
461
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
462
+ emb = torch.cat((freqs, freqs), dim=-1)
463
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
464
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
465
+
466
+ def forward(self, x, seq_len=None):
467
+ # x: [bs, num_attention_heads, seq_len, head_size]
468
+ if seq_len > self.max_seq_len_cached:
469
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
470
+
471
+ return (
472
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
473
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
474
+ )
475
+
476
+
477
+ def rotate_half(x):
478
+ """Rotates half the hidden dims of the input."""
479
+ x1 = x[..., : x.shape[-1] // 2]
480
+ x2 = x[..., x.shape[-1] // 2 :]
481
+ return torch.cat((-x2, x1), dim=-1)
482
+
483
+
484
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
485
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
486
+ """Applies Rotary Position Embedding to the query and key tensors.
487
+
488
+ Args:
489
+ q (`torch.Tensor`): The query tensor.
490
+ k (`torch.Tensor`): The key tensor.
491
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
492
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
493
+ position_ids (`torch.Tensor`):
494
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
495
+ used to pass offsetted position ids when working with a KV-cache.
496
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
497
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
498
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
499
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
500
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
501
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
502
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
503
+ Returns:
504
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
505
+ """
506
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
507
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
508
+ q_embed = (q * cos) + (rotate_half(q) * sin)
509
+ k_embed = (k * cos) + (rotate_half(k) * sin)
510
+ return q_embed, k_embed
511
+
512
+
513
+ # this was adapted from LlamaMLP
514
+ class IdeficsMLP(nn.Module):
515
+ def __init__(
516
+ self,
517
+ hidden_size: int,
518
+ intermediate_size: int,
519
+ hidden_act: str,
520
+ ):
521
+ super().__init__()
522
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
523
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
524
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
525
+ self.act_fn = ACT2FN[hidden_act]
526
+
527
+ def forward(self, x):
528
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
529
+
530
+
531
+ # this was adapted from LlamaAttention
532
+ class IdeficsAttention(nn.Module):
533
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
534
+
535
+ def __init__(
536
+ self,
537
+ hidden_size: int,
538
+ num_heads: int,
539
+ dropout: float = 0.0,
540
+ is_cross_attention: bool = False,
541
+ config: PretrainedConfig = None,
542
+ qk_layer_norms: bool = False,
543
+ ):
544
+ super().__init__()
545
+ self.hidden_size = hidden_size
546
+ self.num_heads = num_heads
547
+ self.head_dim = hidden_size // num_heads
548
+ self.dropout = dropout
549
+ self.is_causal = True
550
+
551
+ if (self.head_dim * num_heads) != self.hidden_size:
552
+ raise ValueError(
553
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
554
+ f" and `num_heads`: {num_heads})."
555
+ )
556
+
557
+ self.is_cross_attention = is_cross_attention
558
+
559
+ if not hasattr(nn.functional, "scaled_dot_product_attention"):
560
+ raise ValueError("this model requires pytorch 2.0 or higher")
561
+
562
+ if self.is_cross_attention:
563
+ kv_input_dim = (
564
+ self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim
565
+ )
566
+ self.q_proj = nn.Linear(
567
+ self.hidden_size,
568
+ num_heads * self.head_dim,
569
+ bias=False,
570
+ )
571
+ self.k_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False)
572
+ self.v_proj = nn.Linear(
573
+ kv_input_dim,
574
+ num_heads * self.head_dim,
575
+ bias=False,
576
+ )
577
+ else:
578
+ self.q_proj = nn.Linear(
579
+ self.hidden_size,
580
+ num_heads * self.head_dim,
581
+ bias=False,
582
+ )
583
+ self.k_proj = nn.Linear(
584
+ self.hidden_size,
585
+ num_heads * self.head_dim,
586
+ bias=False,
587
+ )
588
+ self.v_proj = nn.Linear(
589
+ self.hidden_size,
590
+ num_heads * self.head_dim,
591
+ bias=False,
592
+ )
593
+ self.o_proj = nn.Linear(
594
+ num_heads * self.head_dim,
595
+ hidden_size,
596
+ bias=False,
597
+ )
598
+ self.rotary_emb = IdeficsEmbedding(self.head_dim)
599
+
600
+ self.qk_layer_norms = qk_layer_norms
601
+ if self.qk_layer_norms:
602
+ self.q_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
603
+ self.k_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
604
+
605
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
606
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
607
+
608
+ def forward(
609
+ self,
610
+ hidden_states: torch.Tensor,
611
+ key_value_states: Optional[torch.Tensor] = None,
612
+ attention_mask: Optional[torch.Tensor] = None,
613
+ position_ids: Optional[torch.LongTensor] = None,
614
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
615
+ output_attentions: bool = False,
616
+ use_cache: bool = False,
617
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
618
+ # if key_value_states are provided this layer is used as a cross-attention layer
619
+ is_cross_attention = self.is_cross_attention or key_value_states is not None
620
+
621
+ bsz, q_len, _ = hidden_states.size()
622
+
623
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
624
+ if not is_cross_attention:
625
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
626
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
627
+ else:
628
+ _, kv_len, _ = key_value_states.size() # Note that, in this case, `kv_len` == `kv_seq_len`
629
+ key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
630
+ value_states = (
631
+ self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
632
+ )
633
+
634
+ kv_seq_len = key_states.shape[-2]
635
+ if past_key_value is not None:
636
+ kv_seq_len += past_key_value[0].shape[-2]
637
+ if not is_cross_attention:
638
+ cos, sin = self.rotary_emb(value_states, seq_len=max(kv_seq_len, q_len))
639
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
640
+ # [bsz, nh, t, hd]
641
+
642
+ if past_key_value is not None:
643
+ # reuse k, v, self_attention
644
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
645
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
646
+
647
+ past_key_value = (key_states, value_states) if use_cache else None
648
+
649
+ if self.qk_layer_norms:
650
+ query_states = self.q_layer_norm(query_states)
651
+ key_states = self.k_layer_norm(key_states)
652
+
653
+ if attention_mask is not None:
654
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
655
+ raise ValueError(
656
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
657
+ )
658
+
659
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
660
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
661
+ if query_states.device.type == "cuda" and attention_mask is not None:
662
+ query_states = query_states.contiguous()
663
+ key_states = key_states.contiguous()
664
+ value_states = value_states.contiguous()
665
+
666
+ attn_output = nn.functional.scaled_dot_product_attention(
667
+ query_states,
668
+ key_states,
669
+ value_states,
670
+ attn_mask=attention_mask,
671
+ dropout_p=self.dropout,
672
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
673
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
674
+ )
675
+
676
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
677
+ raise ValueError(
678
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
679
+ f" {attn_output.size()}"
680
+ )
681
+
682
+ attn_output = attn_output.transpose(1, 2)
683
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
684
+
685
+ attn_output = self.o_proj(attn_output)
686
+
687
+ attn_weights = None
688
+ if output_attentions:
689
+ logger.warning_once(
690
+ "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead"
691
+ )
692
+
693
+ return attn_output, attn_weights, past_key_value
694
+
695
+
696
+ # this was adapted from LlamaDecoderLayer
697
+ class IdeficsDecoderLayer(nn.Module):
698
+ def __init__(self, config: IdeficsConfig):
699
+ super().__init__()
700
+ self.hidden_size = config.hidden_size
701
+ self.self_attn = IdeficsAttention(
702
+ hidden_size=self.hidden_size,
703
+ num_heads=config.num_attention_heads,
704
+ dropout=config.dropout,
705
+ config=config,
706
+ )
707
+ self.mlp = IdeficsMLP(
708
+ hidden_size=self.hidden_size,
709
+ intermediate_size=config.intermediate_size,
710
+ hidden_act=config.hidden_act,
711
+ )
712
+ self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
713
+ self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
714
+ self.dropout = config.dropout
715
+
716
+ def forward(
717
+ self,
718
+ hidden_states: torch.Tensor,
719
+ attention_mask: Optional[torch.Tensor] = None,
720
+ position_ids: Optional[torch.LongTensor] = None,
721
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
722
+ output_attentions: Optional[bool] = False,
723
+ use_cache: Optional[bool] = False,
724
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
725
+ """
726
+ Args:
727
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
728
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
729
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
730
+ output_attentions (`bool`, *optional*):
731
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
732
+ returned tensors for more detail.
733
+ use_cache (`bool`, *optional*):
734
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
735
+ (see `past_key_values`).
736
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
737
+ """
738
+
739
+ residual = hidden_states
740
+
741
+ hidden_states = self.input_layernorm(hidden_states)
742
+
743
+ # Self Attention
744
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
745
+ hidden_states=hidden_states,
746
+ attention_mask=attention_mask,
747
+ position_ids=position_ids,
748
+ past_key_value=past_key_value,
749
+ output_attentions=output_attentions,
750
+ use_cache=use_cache,
751
+ )
752
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
753
+ hidden_states = residual + hidden_states
754
+
755
+ # Fully Connected
756
+ residual = hidden_states
757
+ hidden_states = self.post_attention_layernorm(hidden_states)
758
+ hidden_states = self.mlp(hidden_states)
759
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
760
+ hidden_states = residual + hidden_states
761
+
762
+ outputs = (hidden_states,)
763
+
764
+ if output_attentions:
765
+ outputs += (self_attn_weights,)
766
+
767
+ if use_cache:
768
+ outputs += (present_key_value,)
769
+
770
+ return outputs
771
+
772
+
773
+ class IdeficsGatedCrossAttentionLayer(nn.Module):
774
+ def __init__(self, config: IdeficsConfig):
775
+ super().__init__()
776
+ self.hidden_size = config.hidden_size
777
+ self.cross_attn = IdeficsAttention(
778
+ hidden_size=self.hidden_size,
779
+ num_heads=config.num_attention_heads,
780
+ is_cross_attention=True,
781
+ dropout=config.dropout,
782
+ config=config,
783
+ qk_layer_norms=config.qk_layer_norms,
784
+ )
785
+ self.mlp = IdeficsMLP(
786
+ hidden_size=self.hidden_size,
787
+ intermediate_size=config.intermediate_size,
788
+ hidden_act=config.hidden_act,
789
+ )
790
+ self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
791
+ self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
792
+ self.config = config.dropout
793
+
794
+ self.act_cross_attn = nn.Tanh()
795
+ self.act_dense = nn.Tanh()
796
+
797
+ if config.alpha_initializer == "zeros":
798
+ if config.alpha_type == "vector":
799
+ self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
800
+ self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
801
+ elif config.alpha_type == "float":
802
+ self.alpha_cross_attn = nn.Parameter(torch.zeros(1))
803
+ self.alpha_dense = nn.Parameter(torch.zeros(1))
804
+ else:
805
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
806
+
807
+ elif config.alpha_initializer == "ones":
808
+ if config.alpha_type == "vector":
809
+ self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.hidden_size))
810
+ self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.hidden_size))
811
+ elif config.alpha_type == "float":
812
+ self.alpha_cross_attn = nn.Parameter(torch.ones(1))
813
+ self.alpha_dense = nn.Parameter(torch.ones(1))
814
+ else:
815
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
816
+
817
+ elif config.alpha_initializer in {"normal", "gaussian", "random"}:
818
+ if config.alpha_type == "vector":
819
+ self.alpha_cross_attn = nn.Parameter(
820
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))
821
+ )
822
+ self.alpha_dense = nn.Parameter(
823
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))
824
+ )
825
+ elif config.alpha_type == "float":
826
+ self.alpha_cross_attn = nn.Parameter(
827
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1))
828
+ )
829
+ self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1)))
830
+ else:
831
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
832
+
833
+ else:
834
+ raise NotImplementedError(f"Alpha initialization scheme {config.alpha_initializer} not yet implemented!")
835
+
836
+ if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")):
837
+ raise ValueError("Alpha parameters not initialized correctly!")
838
+
839
+ def forward(
840
+ self,
841
+ hidden_states: torch.Tensor,
842
+ attention_mask: Optional[torch.Tensor] = None,
843
+ image_hidden_states: Optional[torch.Tensor] = None,
844
+ image_attention_mask: Optional[torch.Tensor] = None,
845
+ cross_attention_gate: Optional[torch.Tensor] = None,
846
+ output_attentions: Optional[bool] = False,
847
+ use_cache: Optional[bool] = False,
848
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
849
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
850
+ """
851
+ Args:
852
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
853
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
854
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
855
+ image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size
856
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
857
+ cross_attention_gate (`torch.FloatTensor`, *optional*):
858
+ gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images.
859
+ output_attentions (`bool`, *optional*):
860
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
861
+ returned tensors for more detail.
862
+ use_cache (`bool`, *optional*):
863
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
864
+ (see `past_key_values`).
865
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
866
+ """
867
+ if image_hidden_states is None:
868
+ raise ValueError(
869
+ "`image_hidden_states` is required for Idefics cross attention module which are visual features to be"
870
+ " conditioned on."
871
+ )
872
+
873
+ if cross_attention_gate is None:
874
+ raise ValueError(
875
+ "`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images."
876
+ )
877
+
878
+ if past_key_value is not None:
879
+ raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.")
880
+
881
+ residual = hidden_states
882
+
883
+ hidden_states = self.input_layernorm(hidden_states)
884
+
885
+ # Self Attention
886
+ hidden_states, self_attn_weights, present_key_value = self.cross_attn(
887
+ hidden_states=hidden_states,
888
+ key_value_states=image_hidden_states,
889
+ attention_mask=image_attention_mask,
890
+ output_attentions=output_attentions,
891
+ )
892
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
893
+ # Fill in zeros for cross_attention hidden_states of tokens attending to no images
894
+ hidden_states[cross_attention_gate == 0] = hidden_states[cross_attention_gate == 0].fill_(0)
895
+ hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states
896
+
897
+ # Fully Connected
898
+ residual = hidden_states
899
+ hidden_states = self.post_attention_layernorm(hidden_states)
900
+ hidden_states = self.mlp(hidden_states)
901
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
902
+ hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
903
+
904
+ outputs = (hidden_states,)
905
+
906
+ if output_attentions:
907
+ outputs += (self_attn_weights,)
908
+
909
+ if use_cache:
910
+ outputs += (present_key_value,)
911
+
912
+ return outputs
913
+
914
+
915
+ LLAMA_START_DOCSTRING = r"""
916
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
917
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
918
+ etc.)
919
+
920
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
921
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
922
+ and behavior.
923
+
924
+ Parameters:
925
+ config ([`IdeficsConfig`]):
926
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
927
+ load the weights associated with the model, only the configuration. Check out the
928
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
929
+ """
930
+
931
+
932
+ @add_start_docstrings(
933
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
934
+ LLAMA_START_DOCSTRING,
935
+ )
936
+ class IdeficsPreTrainedModel(PreTrainedModel):
937
+ config_class = IdeficsConfig
938
+ base_model_prefix = "model"
939
+ supports_gradient_checkpointing = True
940
+ _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
941
+ _supports_sdpa = True
942
+
943
+ def _init_weights(self, module):
944
+ # important: this ported version of Idefics isn't meant for training from scratch - only
945
+ # inference and fine-tuning - so the proper init weights code has been removed - the m4 code
946
+ # base should be used for training from scratch and it contains the correct code.
947
+ std = self.config.initializer_range
948
+ if isinstance(module, nn.Linear):
949
+ module.weight.data.normal_(mean=0.0, std=std)
950
+ if module.bias is not None:
951
+ module.bias.data.zero_()
952
+ elif isinstance(module, nn.Embedding):
953
+ module.weight.data.normal_(mean=0.0, std=std)
954
+ if module.padding_idx is not None:
955
+ module.weight.data[module.padding_idx].zero_()
956
+
957
+ # Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa
958
+ @classmethod
959
+ def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig:
960
+ # We remove the checks on `is_torch_sdpa_available()` and `cls._supports_sdpa` as Falcon supports SDPA from torch==2.0.0 (no requirement on 2.1).
961
+ _is_bettertransformer = getattr(cls, "use_bettertransformer", False)
962
+ if _is_bettertransformer:
963
+ return config
964
+
965
+ if not hard_check_only:
966
+ config._attn_implementation = "sdpa"
967
+ return config
968
+
969
+
970
+ LLAMA_INPUTS_DOCSTRING = r"""
971
+ Args:
972
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
973
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
974
+ it.
975
+
976
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
977
+ [`PreTrainedTokenizer.__call__`] for details.
978
+
979
+ [What are input IDs?](../glossary#input-ids)
980
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
981
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
982
+
983
+ - 1 for tokens that are **not masked**,
984
+ - 0 for tokens that are **masked**.
985
+
986
+ [What are attention masks?](../glossary#attention-mask)
987
+
988
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
989
+ [`PreTrainedTokenizer.__call__`] for details.
990
+
991
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
992
+ `past_key_values`).
993
+
994
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
995
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
996
+ information on the default strategy.
997
+
998
+ - 1 indicates the head is **not masked**,
999
+ - 0 indicates the head is **masked**.
1000
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1001
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1002
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
1003
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1004
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
1005
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
1006
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
1007
+
1008
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1009
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
1010
+
1011
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1012
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1013
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1014
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1015
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1016
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1017
+ model's internal embedding lookup matrix.
1018
+ use_cache (`bool`, *optional*):
1019
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1020
+ `past_key_values`).
1021
+ output_attentions (`bool`, *optional*):
1022
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1023
+ tensors for more detail.
1024
+ output_hidden_states (`bool`, *optional*):
1025
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1026
+ more detail.
1027
+ return_dict (`bool`, *optional*):
1028
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1029
+ """
1030
+
1031
+
1032
+ @add_start_docstrings(
1033
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
1034
+ LLAMA_START_DOCSTRING,
1035
+ )
1036
+ class IdeficsModel(IdeficsPreTrainedModel):
1037
+ """
1038
+ Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
1039
+
1040
+ Args:
1041
+ config: IdeficsConfig
1042
+ """
1043
+
1044
+ def __init__(self, config: IdeficsConfig):
1045
+ super().__init__(config)
1046
+ self.config = config
1047
+ self.padding_idx = config.pad_token_id
1048
+ self.vocab_size = config.vocab_size
1049
+
1050
+ self.embed_tokens = IdeficsDecoupledEmbedding(
1051
+ num_embeddings=config.vocab_size,
1052
+ num_additional_embeddings=config.additional_vocab_size,
1053
+ embedding_dim=config.hidden_size,
1054
+ partially_freeze=config.freeze_text_layers,
1055
+ padding_idx=self.padding_idx,
1056
+ )
1057
+
1058
+ self.image_size = config.vision_config.image_size
1059
+ self.vision_config = config.vision_config
1060
+ self.vision_model = IdeficsVisionTransformer(config.vision_config)
1061
+
1062
+ # Perceiver Resampler
1063
+ if config.use_resampler:
1064
+ perceiver_config = config.perceiver_config
1065
+ self.perceiver_resampler = IdeficsPerceiverResampler(
1066
+ config,
1067
+ config.vision_config.embed_dim,
1068
+ perceiver_config.resampler_depth,
1069
+ perceiver_config.resampler_n_heads,
1070
+ perceiver_config.resampler_head_dim,
1071
+ perceiver_config.resampler_n_latents,
1072
+ )
1073
+
1074
+ self.layers = nn.ModuleList([IdeficsDecoderLayer(config) for _ in range(config.num_hidden_layers)])
1075
+
1076
+ self.cross_layer_interval = config.cross_layer_interval
1077
+ num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
1078
+ self.gated_cross_attn_layers = nn.ModuleList(
1079
+ [IdeficsGatedCrossAttentionLayer(config) for _ in range(num_cross_layers)]
1080
+ )
1081
+ self.gradient_checkpointing = False
1082
+
1083
+ self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1084
+
1085
+ # Initialize weights and apply final processing
1086
+ self.post_init()
1087
+
1088
+ self.freeze_relevant_params(config)
1089
+
1090
+ def freeze_relevant_params(self, config=None):
1091
+ if config is None:
1092
+ config = self.config
1093
+
1094
+ if config.freeze_text_layers:
1095
+ self.freeze_text_layers(config.freeze_text_module_exceptions)
1096
+
1097
+ if config.freeze_vision_layers:
1098
+ freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
1099
+
1100
+ def freeze_text_layers(self, module_exceptions=[]):
1101
+ for module in [self.layers, self.norm]:
1102
+ freeze_model(module, module_exceptions=module_exceptions)
1103
+
1104
+ def freeze_vision_layers(self, module_exceptions=[]):
1105
+ freeze_model(self.vision_model, module_exceptions=module_exceptions)
1106
+
1107
+ def get_input_embeddings(self):
1108
+ return self.embed_tokens
1109
+
1110
+ def set_input_embeddings(self, value):
1111
+ self.embed_tokens = value
1112
+
1113
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1114
+ def forward(
1115
+ self,
1116
+ input_ids: torch.LongTensor = None,
1117
+ attention_mask: Optional[torch.Tensor] = None,
1118
+ position_ids: Optional[torch.LongTensor] = None,
1119
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1120
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1121
+ pixel_values: Optional[torch.FloatTensor] = None,
1122
+ image_encoder_embeddings: Optional[torch.FloatTensor] = None,
1123
+ perceiver_embeddings: Optional[torch.FloatTensor] = None,
1124
+ image_attention_mask: Optional[torch.Tensor] = None,
1125
+ use_cache: Optional[bool] = None,
1126
+ output_attentions: Optional[bool] = None,
1127
+ output_hidden_states: Optional[bool] = None,
1128
+ interpolate_pos_encoding: Optional[bool] = False,
1129
+ return_dict: Optional[bool] = None,
1130
+ ) -> Union[Tuple, IdeficsBaseModelOutputWithPast]:
1131
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1132
+
1133
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1134
+ output_hidden_states = (
1135
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1136
+ )
1137
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1138
+
1139
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1140
+
1141
+ # retrieve input_ids and inputs_embeds
1142
+ if input_ids is not None and inputs_embeds is not None:
1143
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
1144
+ elif input_ids is not None:
1145
+ batch_size, seq_length = input_ids.shape
1146
+ elif inputs_embeds is not None:
1147
+ batch_size, seq_length, _ = inputs_embeds.shape
1148
+ else:
1149
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
1150
+
1151
+ seq_length_with_past = seq_length
1152
+ past_key_values_length = 0
1153
+
1154
+ if past_key_values is not None:
1155
+ past_key_values_length = past_key_values[0][0].shape[2]
1156
+ seq_length_with_past = seq_length_with_past + past_key_values_length
1157
+
1158
+ if attention_mask is not None and position_ids is None:
1159
+ # create position_ids on the fly for batch generation
1160
+ position_ids = attention_mask.long().cumsum(-1) - 1
1161
+ position_ids.masked_fill_(attention_mask == 0, 1)
1162
+ elif position_ids is None:
1163
+ position_ids = torch.arange(
1164
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1165
+ )
1166
+ position_ids = position_ids.unsqueeze(0)
1167
+
1168
+ if (pixel_values, image_encoder_embeddings, perceiver_embeddings).count(None) != 2:
1169
+ raise ValueError(
1170
+ "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None."
1171
+ )
1172
+
1173
+ elif pixel_values is not None:
1174
+ pixel_values = pixel_values.to(dtype=self.dtype, device=device) # fp16 compatibility
1175
+ batch_size, num_images = pixel_values.shape[:2]
1176
+ pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:])
1177
+
1178
+ # Get sequence from the vision encoder
1179
+ image_hidden_states = self.vision_model(
1180
+ pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding
1181
+ ).last_hidden_state
1182
+
1183
+ elif image_encoder_embeddings is not None:
1184
+ batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size()
1185
+ image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device)
1186
+ image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size)
1187
+
1188
+ if self.config.use_resampler:
1189
+ if perceiver_embeddings is None:
1190
+ perceiver_embeddings = self.perceiver_resampler(image_hidden_states)
1191
+ image_seq_len, image_hidden_size = perceiver_embeddings.size(1), perceiver_embeddings.size(2)
1192
+ else:
1193
+ batch_size, num_images, image_seq_len, image_hidden_size = perceiver_embeddings.size()
1194
+ image_hidden_states = perceiver_embeddings
1195
+ elif perceiver_embeddings is None:
1196
+ image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2)
1197
+ else:
1198
+ raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True")
1199
+
1200
+ image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size)
1201
+ # # Hack to use the model in full language modeling mode
1202
+ # image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device)
1203
+ # Make image_attention_mask compatible with hidden states
1204
+ text_seq_len = image_attention_mask.size(1)
1205
+ image_attention_mask = image_attention_mask.unsqueeze(-1)
1206
+ image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
1207
+ image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len)
1208
+
1209
+ if image_hidden_states is not None:
1210
+ image_batch_size, image_sequence_length, _ = image_hidden_states.size()
1211
+ image_hidden_shape = (image_batch_size, image_sequence_length)
1212
+ if image_attention_mask is None:
1213
+ image_attention_mask = torch.ones(image_hidden_shape, device=device)
1214
+ image_attention_mask = self.invert_attention_mask(image_attention_mask)
1215
+ else:
1216
+ image_attention_mask = None
1217
+
1218
+ # cross_attention_gate:
1219
+ # For any tokens attending to no images, the hidden_states comming out of the cross-attention should be zeroed-out.
1220
+ # `image_attention_mask` has shape [bsz, 1, num_images, hidden_size] with elements equal to either 0.0 or a very negative number.
1221
+ # If any of the elements are 0.0, then the token is attending to at least one image and the gate value is 1. Otherwise the gate value is 0.
1222
+ # `cross_attention_gate` has shape [bsz, seq_len] with elements equal to either 0.0 or 1.0.
1223
+ cross_attention_gate = ((((image_attention_mask == 0.0).any(dim=-1)).to(dtype=self.dtype)).squeeze(dim=1)).to(
1224
+ device
1225
+ )
1226
+
1227
+ if inputs_embeds is None:
1228
+ inputs_embeds = self.embed_tokens(input_ids)
1229
+ # embed positions
1230
+ if attention_mask is None:
1231
+ attention_mask = torch.ones(
1232
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
1233
+ )
1234
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1235
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1236
+ )
1237
+
1238
+ hidden_states = inputs_embeds
1239
+
1240
+ if self.gradient_checkpointing and self.training:
1241
+ if use_cache:
1242
+ logger.warning_once(
1243
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1244
+ )
1245
+ use_cache = False
1246
+
1247
+ # decoder layers
1248
+ all_hidden_states = () if output_hidden_states else None
1249
+ all_self_attns = () if output_attentions else None
1250
+ next_decoder_cache = () if use_cache else None
1251
+
1252
+ for idx, decoder_layer in enumerate(self.layers):
1253
+ if output_hidden_states:
1254
+ all_hidden_states += (hidden_states,)
1255
+
1256
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
1257
+
1258
+ def vblock(
1259
+ main_block,
1260
+ hidden_states,
1261
+ attention_mask,
1262
+ position_ids,
1263
+ past_key_value,
1264
+ image_hidden_states,
1265
+ image_attention_mask,
1266
+ cross_attention_gate,
1267
+ output_attentions,
1268
+ use_cache,
1269
+ layer_idx,
1270
+ cross_layer_interval,
1271
+ gated_cross_attn_layers,
1272
+ ):
1273
+ # TODO(ls): Add cross attention values to respective lists
1274
+ if layer_idx % cross_layer_interval == 0:
1275
+ xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval]
1276
+ outputs = xblock(
1277
+ hidden_states,
1278
+ attention_mask=attention_mask,
1279
+ image_hidden_states=image_hidden_states,
1280
+ image_attention_mask=image_attention_mask,
1281
+ cross_attention_gate=cross_attention_gate,
1282
+ output_attentions=output_attentions,
1283
+ use_cache=use_cache,
1284
+ past_key_value=None, # not implemented
1285
+ )
1286
+ hidden_states = outputs[0]
1287
+
1288
+ layer_outputs = main_block(
1289
+ hidden_states,
1290
+ attention_mask=attention_mask,
1291
+ position_ids=position_ids,
1292
+ past_key_value=past_key_value,
1293
+ output_attentions=output_attentions,
1294
+ use_cache=use_cache,
1295
+ )
1296
+
1297
+ return layer_outputs
1298
+
1299
+ if self.gradient_checkpointing and self.training:
1300
+ past_key_value = None
1301
+ if use_cache:
1302
+ logger.warning_once(
1303
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1304
+ )
1305
+ use_cache = False
1306
+
1307
+ layer_outputs = self._gradient_checkpointing_func(
1308
+ vblock,
1309
+ decoder_layer,
1310
+ hidden_states,
1311
+ attention_mask,
1312
+ position_ids,
1313
+ past_key_value,
1314
+ image_hidden_states,
1315
+ image_attention_mask,
1316
+ cross_attention_gate,
1317
+ output_attentions,
1318
+ use_cache,
1319
+ idx,
1320
+ self.cross_layer_interval,
1321
+ self.gated_cross_attn_layers,
1322
+ )
1323
+ else:
1324
+ layer_outputs = vblock(
1325
+ decoder_layer,
1326
+ hidden_states,
1327
+ attention_mask=attention_mask,
1328
+ position_ids=position_ids,
1329
+ past_key_value=past_key_value,
1330
+ image_hidden_states=image_hidden_states,
1331
+ image_attention_mask=image_attention_mask,
1332
+ cross_attention_gate=cross_attention_gate,
1333
+ output_attentions=output_attentions,
1334
+ use_cache=use_cache,
1335
+ layer_idx=idx,
1336
+ cross_layer_interval=self.cross_layer_interval,
1337
+ gated_cross_attn_layers=self.gated_cross_attn_layers,
1338
+ )
1339
+
1340
+ hidden_states = layer_outputs[0]
1341
+
1342
+ if use_cache:
1343
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
1344
+
1345
+ if output_attentions:
1346
+ all_self_attns += (layer_outputs[1],)
1347
+
1348
+ hidden_states = self.norm(hidden_states)
1349
+
1350
+ # add hidden states from the last decoder layer
1351
+ if output_hidden_states:
1352
+ all_hidden_states += (hidden_states,)
1353
+
1354
+ next_cache = next_decoder_cache if use_cache else None
1355
+ image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size)
1356
+ if not return_dict:
1357
+ return tuple(
1358
+ v
1359
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states]
1360
+ if v is not None
1361
+ )
1362
+ return IdeficsBaseModelOutputWithPast(
1363
+ last_hidden_state=hidden_states,
1364
+ past_key_values=next_cache,
1365
+ hidden_states=all_hidden_states,
1366
+ attentions=all_self_attns,
1367
+ image_hidden_states=image_hidden_states,
1368
+ )
1369
+
1370
+
1371
+ class IdeficsForVisionText2Text(IdeficsPreTrainedModel):
1372
+ _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
1373
+ _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"]
1374
+
1375
+ def __init__(self, config, vision_model=None):
1376
+ super().__init__(config)
1377
+ self.model = IdeficsModel(config)
1378
+
1379
+ self.lm_head = IdeficsDecoupledLinear(
1380
+ in_features=config.hidden_size,
1381
+ out_features=config.vocab_size,
1382
+ out_additional_features=config.additional_vocab_size,
1383
+ bias=False,
1384
+ partially_freeze=config.freeze_lm_head,
1385
+ )
1386
+
1387
+ # Initialize weights and apply final processing
1388
+ self.post_init()
1389
+
1390
+ def get_input_embeddings(self):
1391
+ return self.model.embed_tokens
1392
+
1393
+ def set_input_embeddings(self, value):
1394
+ self.model.embed_tokens = value
1395
+
1396
+ def get_output_embeddings(self):
1397
+ return self.lm_head
1398
+
1399
+ def set_output_embeddings(self, new_embeddings):
1400
+ self.lm_head = new_embeddings
1401
+
1402
+ def set_decoder(self, decoder):
1403
+ self.model = decoder
1404
+
1405
+ def get_decoder(self):
1406
+ return self.model
1407
+
1408
+ def tie_weights(self):
1409
+ """
1410
+ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of
1411
+ IdeficsDecoupledLinear and IdeficsDecoupledEmbedding.
1412
+ """
1413
+ output_embeddings = self.get_output_embeddings()
1414
+ input_embeddings = self.get_input_embeddings()
1415
+
1416
+ if getattr(self.config, "tie_word_embeddings", True):
1417
+ output_embeddings.weight = input_embeddings.weight
1418
+ if input_embeddings.num_additional_embeddings > 0:
1419
+ assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings
1420
+ output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight
1421
+
1422
+ if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
1423
+ output_embeddings.out_features = input_embeddings.num_embeddings
1424
+ if hasattr(output_embeddings, "out_additional_features") and hasattr(
1425
+ input_embeddings, "num_additional_embeddings"
1426
+ ):
1427
+ output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings
1428
+
1429
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1430
+ @replace_return_docstrings(output_type=IdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1431
+ def forward(
1432
+ self,
1433
+ input_ids: torch.LongTensor = None,
1434
+ attention_mask: Optional[torch.Tensor] = None,
1435
+ position_ids: Optional[torch.LongTensor] = None,
1436
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1437
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1438
+ pixel_values: Optional[torch.FloatTensor] = None,
1439
+ image_encoder_embeddings: Optional[torch.FloatTensor] = None,
1440
+ perceiver_embeddings: Optional[torch.FloatTensor] = None,
1441
+ image_attention_mask: Optional[torch.Tensor] = None,
1442
+ labels: Optional[torch.LongTensor] = None,
1443
+ use_cache: Optional[bool] = None,
1444
+ output_attentions: Optional[bool] = None,
1445
+ output_hidden_states: Optional[bool] = None,
1446
+ interpolate_pos_encoding: Optional[bool] = False,
1447
+ return_dict: Optional[bool] = None,
1448
+ ) -> Union[Tuple, IdeficsCausalLMOutputWithPast]:
1449
+ r"""
1450
+ Args:
1451
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1452
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1453
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1454
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1455
+
1456
+ Returns:
1457
+
1458
+ Example:
1459
+
1460
+ ```python
1461
+ >>> from transformers import AutoProcessor, IdeficsForVisionText2Text
1462
+
1463
+ >>> model = IdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b")
1464
+ >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics-9b")
1465
+
1466
+ >>> dogs_image_url_1 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg"
1467
+ >>> dogs_image_url_2 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image2.jpeg"
1468
+
1469
+ >>> prompts = [
1470
+ ... [
1471
+ ... "User:",
1472
+ ... dogs_image_url_1,
1473
+ ... "Describe this image.\nAssistant: An image of two dogs.\n",
1474
+ ... "User:",
1475
+ ... dogs_image_url_2,
1476
+ ... "Describe this image.\nAssistant:",
1477
+ ... ]
1478
+ ... ]
1479
+ >>> inputs = processor(prompts, return_tensors="pt")
1480
+ >>> generate_ids = model.generate(**inputs, max_new_tokens=6)
1481
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True)
1482
+ ```"""
1483
+
1484
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1485
+ output_hidden_states = (
1486
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1487
+ )
1488
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1489
+
1490
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1491
+ outputs = self.model(
1492
+ input_ids=input_ids,
1493
+ attention_mask=attention_mask,
1494
+ position_ids=position_ids,
1495
+ past_key_values=past_key_values,
1496
+ inputs_embeds=inputs_embeds,
1497
+ pixel_values=pixel_values,
1498
+ image_encoder_embeddings=image_encoder_embeddings,
1499
+ perceiver_embeddings=perceiver_embeddings,
1500
+ image_attention_mask=image_attention_mask,
1501
+ use_cache=use_cache,
1502
+ output_attentions=output_attentions,
1503
+ output_hidden_states=output_hidden_states,
1504
+ interpolate_pos_encoding=interpolate_pos_encoding,
1505
+ return_dict=return_dict,
1506
+ )
1507
+
1508
+ hidden_states = outputs[0]
1509
+ logits = self.lm_head(hidden_states)
1510
+
1511
+ loss = None
1512
+ if labels is not None:
1513
+ labels = labels.to(logits.device)
1514
+ # Shift so that tokens < n predict n
1515
+ if attention_mask is not None:
1516
+ shift_attention_mask = attention_mask[..., 1:].to(logits.device)
1517
+ shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous()
1518
+ shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous()
1519
+ else:
1520
+ shift_logits = logits[..., :-1, :].contiguous()
1521
+ shift_labels = labels[..., 1:].contiguous()
1522
+ # Flatten the tokens
1523
+ loss_fct = CrossEntropyLoss()
1524
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1525
+
1526
+ if not return_dict:
1527
+ output = (logits,) + outputs[1:]
1528
+ return (loss,) + output if loss is not None else output
1529
+
1530
+ return IdeficsCausalLMOutputWithPast(
1531
+ loss=loss,
1532
+ logits=logits,
1533
+ past_key_values=outputs.past_key_values,
1534
+ hidden_states=outputs.hidden_states,
1535
+ attentions=outputs.attentions,
1536
+ image_hidden_states=outputs.image_hidden_states,
1537
+ )
1538
+
1539
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
1540
+ image_hidden_states = kwargs.pop("image_hidden_states", None)
1541
+ if image_hidden_states is not None:
1542
+ if self.config.use_resampler:
1543
+ kwargs["perceiver_embeddings"] = image_hidden_states
1544
+ else:
1545
+ kwargs["image_encoder_embeddings"] = image_hidden_states
1546
+ kwargs["pixel_values"] = None
1547
+ inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs)
1548
+ unwanted_kwargs = ["token_type_ids"]
1549
+ for kwarg in unwanted_kwargs:
1550
+ inputs.pop(kwarg, None)
1551
+ return inputs
1552
+
1553
+ @staticmethod
1554
+ def _expand_inputs_for_generation(
1555
+ *args,
1556
+ **model_kwargs,
1557
+ ):
1558
+ return expand_inputs_for_generation(*args, **model_kwargs)
1559
+
1560
+ def _update_model_kwargs_for_generation(
1561
+ self,
1562
+ outputs: ModelOutput,
1563
+ model_kwargs: Dict[str, Any],
1564
+ is_encoder_decoder: bool = False,
1565
+ standardize_cache_format: bool = False,
1566
+ ) -> Dict[str, Any]:
1567
+ model_kwargs = super()._update_model_kwargs_for_generation(
1568
+ outputs,
1569
+ model_kwargs,
1570
+ is_encoder_decoder,
1571
+ standardize_cache_format,
1572
+ )
1573
+
1574
+ if "image_attention_mask" in model_kwargs:
1575
+ image_attention_mask = model_kwargs["image_attention_mask"]
1576
+ last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
1577
+ model_kwargs["image_attention_mask"] = last_mask
1578
+
1579
+ # Get the precomputed image_hidden_states
1580
+ model_kwargs["image_hidden_states"] = outputs.image_hidden_states
1581
+ return model_kwargs
1582
+
1583
+ @staticmethod
1584
+ def _reorder_cache(past, beam_idx):
1585
+ reordered_past = ()
1586
+ for layer_past in past:
1587
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
1588
+ return reordered_past
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/processing_idefics.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for IDEFICS.
17
+ """
18
+
19
+ from typing import Callable, List, Optional, Union
20
+ from urllib.parse import urlparse
21
+
22
+ from ...feature_extraction_utils import BatchFeature
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
25
+ from ...utils import TensorType, is_torch_available
26
+
27
+
28
+ if is_torch_available():
29
+ import torch
30
+
31
+
32
+ IMAGE_TOKEN = "<image>"
33
+
34
+
35
+ # copied from m4.training.packing
36
+ def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1):
37
+ # This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]]
38
+
39
+ # If any of images index are more than num_classes, set them to -1.
40
+ # Words after the max number of images allowed have been seen don't attend on anything
41
+ if num_classes != -1:
42
+ incremental_mask[incremental_mask >= num_classes] = -1
43
+
44
+ negatives = incremental_mask == -1
45
+ incremental_mask[negatives] = 0
46
+ attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes)
47
+ attn_mask[negatives, :] = 0
48
+ return attn_mask
49
+
50
+
51
+ # copied from m4.training.packing
52
+ def image_attention_mask_for_packed_input_ids(input_ids, tokenizer):
53
+ image_attention_mask = torch.full_like(input_ids, fill_value=-1)
54
+ next_image_attention_mask = torch.full_like(input_ids, fill_value=-1)
55
+ image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
56
+ eod_token_id = tokenizer.eos_token_id
57
+ for batch_idx in range(input_ids.size(0)):
58
+ count = -1
59
+ seen_eod = False
60
+ for idx, token_id in enumerate(input_ids[batch_idx]):
61
+ if token_id == image_token_id:
62
+ count += 1
63
+ image_attention_mask[batch_idx][idx] = count
64
+ seen_eod = False
65
+ else:
66
+ image_attention_mask[batch_idx][idx] = count
67
+
68
+ if seen_eod:
69
+ image_attention_mask[batch_idx][idx] = -1
70
+
71
+ if token_id == eod_token_id:
72
+ seen_eod = True
73
+
74
+ for batch_idx in range(input_ids.size(0)):
75
+ count = -1
76
+ seen_eod = False
77
+ for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1):
78
+ token_id = input_ids[batch_idx][idx]
79
+ if token_id == image_token_id:
80
+ count += 1
81
+ next_image_attention_mask[batch_idx][idx] = count
82
+ seen_eod = False
83
+ else:
84
+ next_image_attention_mask[batch_idx][idx] = count
85
+
86
+ if token_id == eod_token_id:
87
+ seen_eod = True
88
+
89
+ if seen_eod:
90
+ next_image_attention_mask[batch_idx][idx] = -1
91
+
92
+ non_negative_indices = next_image_attention_mask[batch_idx] != -1
93
+ next_image_attention_mask[batch_idx][non_negative_indices] -= count
94
+ next_image_attention_mask[batch_idx][non_negative_indices] *= -1
95
+
96
+ return image_attention_mask, next_image_attention_mask
97
+
98
+
99
+ def is_url(string):
100
+ """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
101
+ invalidated the url"""
102
+ if " " in string:
103
+ return False
104
+ result = urlparse(string)
105
+ return all([result.scheme, result.netloc])
106
+
107
+
108
+ class IdeficsProcessor(ProcessorMixin):
109
+ r"""
110
+ Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
111
+
112
+ [`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
113
+ the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
114
+
115
+ Args:
116
+ image_processor (`IdeficsImageProcessor`):
117
+ An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
118
+ tokenizer (`LlamaTokenizerFast`):
119
+ An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
120
+ image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image)
121
+ """
122
+
123
+ attributes = ["image_processor", "tokenizer"]
124
+ image_processor_class = "IdeficsImageProcessor"
125
+ tokenizer_class = "LlamaTokenizerFast"
126
+
127
+ def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
128
+ if image_processor is None:
129
+ raise ValueError("You need to specify an `image_processor`.")
130
+ if tokenizer is None:
131
+ raise ValueError("You need to specify a `tokenizer`.")
132
+
133
+ super().__init__(image_processor, tokenizer)
134
+ self.current_processor = self.image_processor
135
+ self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
136
+
137
+ self.default_image_dims = (
138
+ self.image_processor.image_num_channels,
139
+ self.image_processor.image_size,
140
+ self.image_processor.image_size,
141
+ )
142
+
143
+ self.tokenizer_was_trained_with_end_of_utterance_token = (
144
+ True
145
+ if "<end_of_utterance>" in self.tokenizer.special_tokens_map.get("additional_special_tokens", [])
146
+ else False
147
+ )
148
+
149
+ def __call__(
150
+ self,
151
+ prompts: Union[List[TextInput], List[List[TextInput]]],
152
+ padding: Union[bool, str, PaddingStrategy] = "longest",
153
+ truncation: Union[bool, str, TruncationStrategy] = None,
154
+ max_length: Optional[int] = None,
155
+ transform: Callable = None,
156
+ add_eos_token=False,
157
+ add_end_of_utterance_token=None,
158
+ debug=False,
159
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
160
+ ) -> BatchEncoding:
161
+ """This method takes batched or non-batched prompts made of text and images and converts them into prompts that
162
+ the model was trained on and prepares the image pixel values for the model to process.
163
+
164
+ Args:
165
+ prompts (`Union[List[TextInput], [List[List[TextInput]]]]`):
166
+ either a single prompt or a batched list of prompts - see the detailed description immediately after
167
+ the end of the arguments doc section.
168
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `"longest"`):
169
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
170
+ index) among:
171
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
172
+ sequence if provided).
173
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
174
+ acceptable input length for the model if that argument is not provided.
175
+ - `False` or `'do_not_pad'`: No padding. This will raise an error if the input sequences are of different
176
+ lengths.
177
+ Note: Unlike most processors, which set padding=`False` by default, `IdeficsProcessor` sets `padding="longest"`
178
+ by default. See https://github.com/huggingface/transformers/pull/29449#pullrequestreview-1925576061 for why.
179
+ max_length (`int`, *optional*):
180
+ Maximum length of the returned list and optionally padding length (see above).
181
+ truncation (`bool`, *optional*):
182
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
183
+ transform (`Callable`, *optional*):
184
+ A custom transform function that accepts a single image can be passed for training. For example,
185
+ `torchvision.Compose` can be used to compose multiple functions. If `None` a preset inference-specific
186
+ set of transforms will be applied to the images
187
+ add_eos_token (`bool`, *optional*, defaults to `False`):
188
+ Adds `eos_token` at the end of the final prompt if True`
189
+ add_end_of_utterance_token (`bool`, *optional*)
190
+ Whether to automatically add `<end_of_utterance>` after each prompt's text input (unless followed by an
191
+ image). If `None` the tokenizer will be checked instead and if this token is found in
192
+ `additional_special_tokens` then the value will be `True`.
193
+ debug (`bool`, *optional*, defaults to `False`):
194
+ `True` value will help debug prompt generation by dumping useful information
195
+ return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
196
+ The type of tensors to return. Can be one of:
197
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
198
+
199
+ Returns:
200
+ a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
201
+ directly passed to `model.generate`
202
+
203
+ Detailed explanation:
204
+
205
+ Each entry in `prompts` is either a text to be passed as is or an image that will be processed.
206
+
207
+ An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
208
+
209
+ When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>`
210
+ entry into the prompt.
211
+
212
+ Example:
213
+
214
+ ```python
215
+ checkpoint = "HuggingFaceM4/idefics-9b"
216
+ processor = AutoProcessor.from_pretrained(checkpoint)
217
+ url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
218
+ img = processor.image_processor.fetch_images([url])[0]
219
+
220
+ prompts = [
221
+ "User:",
222
+ img,
223
+ "Describe this image.\nAssistant: An image of two kittens in grass.\n",
224
+ "User:",
225
+ "https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
226
+ "Describe this image.\nAssistant:",
227
+ ]
228
+
229
+ inputs = processor(prompts, return_tensors="pt")
230
+ generated_ids = model.generate(**inputs, max_length=100)
231
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
232
+ ```
233
+
234
+ In this example the `prompts` will be converted into:
235
+
236
+ ```
237
+ <s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
238
+ Assistant: An image of two kittens in grass.
239
+ User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
240
+ Assistant:'
241
+ ```
242
+
243
+ and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
244
+ `pixel_values` dict entry of the return value.
245
+
246
+ This example also examplifies that images can be passed as objects or as text urls. It can be seen that the
247
+ first image is passed as object and the second one as a url.
248
+
249
+ To do training do:
250
+
251
+ ```python
252
+ image_transform = transforms.Compose(
253
+ [
254
+ transforms.RandomResizedCrop(
255
+ (w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
256
+ ),
257
+ transforms.ToTensor(),
258
+ transforms.Normalize(mean=self.image_mean, std=self.image_std),
259
+ ]
260
+ )
261
+ inputs = processor(prompts, transform=image_transform, return_tensors="pt")
262
+ ```
263
+
264
+ In order to help debug prompt generation enable `debug=True` which will show you what's happening.
265
+
266
+ """
267
+
268
+ # if the value isn't overriden by the user, check if the tokenizer was trained with this token and then use it
269
+ if add_end_of_utterance_token is None:
270
+ add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token
271
+
272
+ # turn non-batched prompts into batched
273
+ if not any(isinstance(i, list) for i in prompts):
274
+ prompts = [prompts]
275
+
276
+ fake_token = "<fake_token_around_image>"
277
+ image_token = "<image>"
278
+ end_of_utterance_token = "<end_of_utterance>"
279
+
280
+ def image_tokens(last_was_image):
281
+ if last_was_image:
282
+ return image_token + fake_token
283
+ else:
284
+ return fake_token + image_token + fake_token
285
+
286
+ all_prompts = []
287
+ all_images = []
288
+ for sample in prompts:
289
+ # the model was trained on samples starting with <s>
290
+ full_text = f"{self.tokenizer.bos_token}"
291
+
292
+ # an image can either be an image object in the item or the url, everything else is a verbatim prompt text
293
+ image_objects = []
294
+ last_was_image = False
295
+ last_was_text = False
296
+ for i, item in enumerate(sample):
297
+ if i > 0:
298
+ last_was_text = True if not last_was_image else False
299
+
300
+ if isinstance(item, str):
301
+ item = item.strip(" ")
302
+ if is_url(item):
303
+ image = self.image_processor.fetch_images(item)
304
+ full_text += image_tokens(last_was_image)
305
+ image_objects.append(image)
306
+ last_was_image = True
307
+ else:
308
+ # we add end_of_utterance_token between each subsequent text prompts (but not at the last one!)
309
+ if add_end_of_utterance_token and last_was_text:
310
+ full_text += end_of_utterance_token
311
+ full_text += item
312
+ last_was_image = False
313
+ else:
314
+ # must be an image obj
315
+ full_text += image_tokens(last_was_image)
316
+ image_objects.append(item)
317
+ last_was_image = True
318
+
319
+ if add_eos_token:
320
+ full_text += self.tokenizer.eos_token
321
+
322
+ if debug is True:
323
+ print(f"{full_text=}")
324
+
325
+ image_objects = self.image_processor(image_objects, transform=transform)
326
+
327
+ all_prompts.append(full_text)
328
+ all_images.append(image_objects)
329
+
330
+ text_encoding = self.tokenizer(
331
+ text=all_prompts,
332
+ add_special_tokens=False,
333
+ padding=padding,
334
+ truncation=truncation,
335
+ max_length=max_length,
336
+ )
337
+ all_texts = text_encoding["input_ids"]
338
+ all_attention_masks = text_encoding["attention_mask"]
339
+
340
+ # max_num_images has to be at least 1 even when there are no images
341
+ max_num_images = max(len(x) for x in all_images)
342
+ max_num_images = max(1, max_num_images)
343
+
344
+ at_least_one_image = sum(len(x) for x in all_images) > 0
345
+ output_input_ids = []
346
+ output_images = []
347
+ output_attention_masks = []
348
+ for text, attention_mask, images in zip(all_texts, all_attention_masks, all_images):
349
+ padded_input_ids = text
350
+
351
+ image_count = padded_input_ids.count(self.image_token_id)
352
+ local_max_num_images = min(image_count, max_num_images)
353
+
354
+ current_images = images[:local_max_num_images]
355
+
356
+ if len(current_images) > 0:
357
+ padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:])
358
+ padded_image_tensor[: current_images.size(0)] = current_images
359
+ else:
360
+ padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims)
361
+
362
+ output_images.append(padded_image_tensor)
363
+ output_input_ids.append(torch.tensor(padded_input_ids))
364
+ output_attention_masks.append(torch.tensor(attention_mask))
365
+
366
+ output_input_ids = torch.stack(output_input_ids)
367
+ output_images = torch.stack(output_images)
368
+ output_attention_masks = torch.stack(output_attention_masks)
369
+
370
+ if at_least_one_image:
371
+ image_attention_mask, _ = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer)
372
+ image_attention_mask = incremental_to_binary_attention_mask(
373
+ image_attention_mask, num_classes=max_num_images
374
+ )
375
+ else:
376
+ # in full language mode we set the image mask to all-0s
377
+ image_attention_mask = torch.zeros(
378
+ output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool
379
+ )
380
+
381
+ return BatchFeature(
382
+ data={
383
+ "input_ids": output_input_ids,
384
+ "attention_mask": output_attention_masks,
385
+ "pixel_values": output_images,
386
+ "image_attention_mask": image_attention_mask,
387
+ }
388
+ )
389
+
390
+ def batch_decode(self, *args, **kwargs):
391
+ """
392
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
393
+ refer to the docstring of this method for more information.
394
+ """
395
+ return self.tokenizer.batch_decode(*args, **kwargs)
396
+
397
+ def decode(self, *args, **kwargs):
398
+ """
399
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
400
+ the docstring of this method for more information.
401
+ """
402
+ return self.tokenizer.decode(*args, **kwargs)
403
+
404
+ @property
405
+ def model_input_names(self):
406
+ tokenizer_input_names = self.tokenizer.model_input_names
407
+ image_processor_input_names = self.image_processor.model_input_names
408
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/vision.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object"""
16
+
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
28
+ from ...utils import ModelOutput, logging
29
+ from .configuration_idefics import IdeficsVisionConfig
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class IdeficsVisionModelOutput(ModelOutput):
37
+ """
38
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
39
+
40
+ Args:
41
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
42
+ The image embeddings obtained by applying the projection layer to the pooler_output.
43
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
44
+ Sequence of hidden-states at the output of the last layer of the model.
45
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
46
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
47
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
48
+
49
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
50
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
51
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
52
+ sequence_length)`.
53
+
54
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
55
+ heads.
56
+ """
57
+
58
+ image_embeds: Optional[torch.FloatTensor] = None
59
+ last_hidden_state: torch.FloatTensor = None
60
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
61
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
62
+
63
+
64
+ # Adapted from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings
65
+ class IdeficsVisionEmbeddings(nn.Module):
66
+ def __init__(self, config: IdeficsVisionConfig):
67
+ super().__init__()
68
+ self.config = config
69
+ self.embed_dim = config.hidden_size
70
+ self.image_size = config.image_size
71
+ self.patch_size = config.patch_size
72
+
73
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
74
+
75
+ self.patch_embedding = nn.Conv2d(
76
+ in_channels=config.num_channels,
77
+ out_channels=self.embed_dim,
78
+ kernel_size=self.patch_size,
79
+ stride=self.patch_size,
80
+ bias=False,
81
+ )
82
+
83
+ self.num_patches = (self.image_size // self.patch_size) ** 2
84
+ self.num_positions = self.num_patches + 1
85
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
86
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
87
+
88
+ # Heavily inspired from https://github.com/huggingface/transformers/blob/v4.33.0/src/transformers/models/vit/modeling_vit.py#L82
89
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
90
+ """
91
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
92
+ resolution images.
93
+
94
+ Source:
95
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
96
+ """
97
+
98
+ num_patches = embeddings.shape[1] - 1
99
+ pos_embed = self.position_embedding(self.position_ids)
100
+ num_positions = pos_embed.shape[1] - 1
101
+ if num_patches == num_positions and height == width:
102
+ return pos_embed
103
+ class_pos_embed = pos_embed[:, 0]
104
+ patch_pos_embed = pos_embed[:, 1:]
105
+
106
+ embed_dim = embeddings.shape[-1]
107
+ num_h_patches = height // self.config.patch_size
108
+ num_w_patches = width // self.config.patch_size
109
+ # we add a small number to avoid floating point error in the interpolation
110
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
111
+ num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1
112
+ sqrt_num_positions = math.sqrt(num_positions)
113
+ patch_pos_embed = patch_pos_embed.reshape(1, int(sqrt_num_positions), int(sqrt_num_positions), embed_dim)
114
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
115
+ fp32_upcasting = patch_pos_embed.dtype == torch.bfloat16
116
+ if fp32_upcasting:
117
+ logger.warning_once(
118
+ "Upcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate "
119
+ "is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead."
120
+ )
121
+ patch_pos_embed = patch_pos_embed.to(torch.float)
122
+ patch_pos_embed = nn.functional.interpolate(
123
+ patch_pos_embed,
124
+ scale_factor=(num_h_patches / sqrt_num_positions, num_w_patches / sqrt_num_positions),
125
+ mode="bicubic",
126
+ align_corners=False,
127
+ )
128
+ if fp32_upcasting:
129
+ patch_pos_embed = patch_pos_embed.to(torch.bfloat16)
130
+ if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]:
131
+ raise ValueError(
132
+ f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the "
133
+ f"shape of position embedding ({patch_pos_embed.shape[-2], patch_pos_embed.shape[-1]})"
134
+ )
135
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, embed_dim)
136
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
137
+
138
+ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
139
+ batch_size, num_channels, height, width = pixel_values.shape
140
+ if not interpolate_pos_encoding:
141
+ if height != self.image_size or width != self.image_size:
142
+ raise ValueError(
143
+ f"Input image size ({height}*{width}) doesn't match model"
144
+ f" ({self.image_size}*{self.image_size}). You should try to set `interpolate_pos_encoding=True`"
145
+ )
146
+
147
+ target_dtype = self.patch_embedding.weight.dtype
148
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
149
+
150
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
151
+
152
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
153
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
154
+
155
+ # add positional encoding to each token
156
+ if interpolate_pos_encoding:
157
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
158
+ else:
159
+ embeddings = embeddings + self.position_embedding(self.position_ids)
160
+
161
+ return embeddings
162
+
163
+
164
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->IdeficsVision
165
+ class IdeficsVisionAttention(nn.Module):
166
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
167
+
168
+ def __init__(self, config):
169
+ super().__init__()
170
+ self.config = config
171
+ self.embed_dim = config.hidden_size
172
+ self.num_heads = config.num_attention_heads
173
+ self.head_dim = self.embed_dim // self.num_heads
174
+ if self.head_dim * self.num_heads != self.embed_dim:
175
+ raise ValueError(
176
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
177
+ f" {self.num_heads})."
178
+ )
179
+ self.scale = self.head_dim**-0.5
180
+ self.dropout = config.attention_dropout
181
+
182
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
183
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
184
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
185
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
186
+
187
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
188
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
189
+
190
+ def forward(
191
+ self,
192
+ hidden_states: torch.Tensor,
193
+ attention_mask: Optional[torch.Tensor] = None,
194
+ causal_attention_mask: Optional[torch.Tensor] = None,
195
+ output_attentions: Optional[bool] = False,
196
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
197
+ """Input shape: Batch x Time x Channel"""
198
+
199
+ bsz, tgt_len, embed_dim = hidden_states.size()
200
+
201
+ # get query proj
202
+ query_states = self.q_proj(hidden_states) * self.scale
203
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
204
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
205
+
206
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
207
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
208
+ key_states = key_states.view(*proj_shape)
209
+ value_states = value_states.view(*proj_shape)
210
+
211
+ src_len = key_states.size(1)
212
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
213
+
214
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
215
+ raise ValueError(
216
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
217
+ f" {attn_weights.size()}"
218
+ )
219
+
220
+ # apply the causal_attention_mask first
221
+ if causal_attention_mask is not None:
222
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
223
+ raise ValueError(
224
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
225
+ f" {causal_attention_mask.size()}"
226
+ )
227
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
228
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
229
+
230
+ if attention_mask is not None:
231
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
232
+ raise ValueError(
233
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
234
+ )
235
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
236
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
237
+
238
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
239
+
240
+ if output_attentions:
241
+ # this operation is a bit akward, but it's required to
242
+ # make sure that attn_weights keeps its gradient.
243
+ # In order to do so, attn_weights have to reshaped
244
+ # twice and have to be reused in the following
245
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
246
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
247
+ else:
248
+ attn_weights_reshaped = None
249
+
250
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
251
+
252
+ attn_output = torch.bmm(attn_probs, value_states)
253
+
254
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
255
+ raise ValueError(
256
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
257
+ f" {attn_output.size()}"
258
+ )
259
+
260
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
261
+ attn_output = attn_output.transpose(1, 2)
262
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
263
+
264
+ attn_output = self.out_proj(attn_output)
265
+
266
+ return attn_output, attn_weights_reshaped
267
+
268
+
269
+ # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->IdeficsVision
270
+ class IdeficsVisionMLP(nn.Module):
271
+ def __init__(self, config):
272
+ super().__init__()
273
+ self.config = config
274
+ self.activation_fn = ACT2FN[config.hidden_act]
275
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
276
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
277
+
278
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
279
+ hidden_states = self.fc1(hidden_states)
280
+ hidden_states = self.activation_fn(hidden_states)
281
+ hidden_states = self.fc2(hidden_states)
282
+ return hidden_states
283
+
284
+
285
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->IdeficsVision
286
+ class IdeficsVisionEncoderLayer(nn.Module):
287
+ def __init__(self, config: IdeficsVisionConfig):
288
+ super().__init__()
289
+ self.embed_dim = config.hidden_size
290
+ self.self_attn = IdeficsVisionAttention(config)
291
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
292
+ self.mlp = IdeficsVisionMLP(config)
293
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
294
+
295
+ def forward(
296
+ self,
297
+ hidden_states: torch.Tensor,
298
+ attention_mask: torch.Tensor,
299
+ causal_attention_mask: torch.Tensor,
300
+ output_attentions: Optional[bool] = False,
301
+ ) -> Tuple[torch.FloatTensor]:
302
+ """
303
+ Args:
304
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
305
+ attention_mask (`torch.FloatTensor`): attention mask of size
306
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
307
+ `(config.encoder_attention_heads,)`.
308
+ output_attentions (`bool`, *optional*):
309
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
310
+ returned tensors for more detail.
311
+ """
312
+ residual = hidden_states
313
+
314
+ hidden_states = self.layer_norm1(hidden_states)
315
+ hidden_states, attn_weights = self.self_attn(
316
+ hidden_states=hidden_states,
317
+ attention_mask=attention_mask,
318
+ causal_attention_mask=causal_attention_mask,
319
+ output_attentions=output_attentions,
320
+ )
321
+ hidden_states = residual + hidden_states
322
+
323
+ residual = hidden_states
324
+ hidden_states = self.layer_norm2(hidden_states)
325
+ hidden_states = self.mlp(hidden_states)
326
+ hidden_states = residual + hidden_states
327
+
328
+ outputs = (hidden_states,)
329
+
330
+ if output_attentions:
331
+ outputs += (attn_weights,)
332
+
333
+ return outputs
334
+
335
+
336
+ # Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->IdeficsVision
337
+ class IdeficsVisionEncoder(nn.Module):
338
+ """
339
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
340
+ [`IdeficsVisionEncoderLayer`].
341
+
342
+ Args:
343
+ config: IdeficsVisionConfig
344
+ """
345
+
346
+ def __init__(self, config: IdeficsVisionConfig):
347
+ super().__init__()
348
+ self.config = config
349
+ self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
350
+ self.gradient_checkpointing = False
351
+
352
+ def forward(
353
+ self,
354
+ inputs_embeds,
355
+ attention_mask: Optional[torch.Tensor] = None,
356
+ causal_attention_mask: Optional[torch.Tensor] = None,
357
+ output_attentions: Optional[bool] = None,
358
+ output_hidden_states: Optional[bool] = None,
359
+ return_dict: Optional[bool] = None,
360
+ ) -> Union[Tuple, BaseModelOutput]:
361
+ r"""
362
+ Args:
363
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
364
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
365
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
366
+ than the model's internal embedding lookup matrix.
367
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
368
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
369
+
370
+ - 1 for tokens that are **not masked**,
371
+ - 0 for tokens that are **masked**.
372
+
373
+ [What are attention masks?](../glossary#attention-mask)
374
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
375
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
376
+
377
+ - 1 for tokens that are **not masked**,
378
+ - 0 for tokens that are **masked**.
379
+
380
+ [What are attention masks?](../glossary#attention-mask)
381
+ output_attentions (`bool`, *optional*):
382
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
383
+ returned tensors for more detail.
384
+ output_hidden_states (`bool`, *optional*):
385
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
386
+ for more detail.
387
+ return_dict (`bool`, *optional*):
388
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
389
+ """
390
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
391
+ output_hidden_states = (
392
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
393
+ )
394
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
395
+
396
+ encoder_states = () if output_hidden_states else None
397
+ all_attentions = () if output_attentions else None
398
+
399
+ hidden_states = inputs_embeds
400
+ for idx, encoder_layer in enumerate(self.layers):
401
+ if output_hidden_states:
402
+ encoder_states = encoder_states + (hidden_states,)
403
+ if self.gradient_checkpointing and self.training:
404
+ layer_outputs = self._gradient_checkpointing_func(
405
+ encoder_layer.__call__,
406
+ hidden_states,
407
+ attention_mask,
408
+ causal_attention_mask,
409
+ output_attentions,
410
+ )
411
+ else:
412
+ layer_outputs = encoder_layer(
413
+ hidden_states,
414
+ attention_mask,
415
+ causal_attention_mask,
416
+ output_attentions=output_attentions,
417
+ )
418
+
419
+ hidden_states = layer_outputs[0]
420
+
421
+ if output_attentions:
422
+ all_attentions = all_attentions + (layer_outputs[1],)
423
+
424
+ if output_hidden_states:
425
+ encoder_states = encoder_states + (hidden_states,)
426
+
427
+ if not return_dict:
428
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
429
+ return BaseModelOutput(
430
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
431
+ )
432
+
433
+
434
+ # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer
435
+ class IdeficsVisionTransformer(nn.Module):
436
+ def __init__(self, config: IdeficsVisionConfig):
437
+ super().__init__()
438
+ self.config = config
439
+ embed_dim = config.hidden_size
440
+
441
+ self.embeddings = IdeficsVisionEmbeddings(config)
442
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
443
+ self.encoder = IdeficsVisionEncoder(config)
444
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
445
+
446
+ # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward
447
+ def forward(
448
+ self,
449
+ pixel_values: Optional[torch.FloatTensor] = None,
450
+ output_attentions: Optional[bool] = None,
451
+ output_hidden_states: Optional[bool] = None,
452
+ interpolate_pos_encoding: Optional[bool] = False,
453
+ return_dict: Optional[bool] = None,
454
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
455
+ r"""
456
+ Returns:
457
+
458
+ """
459
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
460
+ output_hidden_states = (
461
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
462
+ )
463
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
464
+
465
+ if pixel_values is None:
466
+ raise ValueError("You have to specify pixel_values")
467
+
468
+ hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
469
+ hidden_states = self.pre_layrnorm(hidden_states)
470
+
471
+ encoder_outputs = self.encoder(
472
+ inputs_embeds=hidden_states,
473
+ output_attentions=output_attentions,
474
+ output_hidden_states=output_hidden_states,
475
+ return_dict=return_dict,
476
+ )
477
+
478
+ last_hidden_state = encoder_outputs[0]
479
+ pooled_output = last_hidden_state[:, 0, :]
480
+ pooled_output = self.post_layernorm(pooled_output)
481
+
482
+ if not return_dict:
483
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
484
+
485
+ return BaseModelOutputWithPooling(
486
+ last_hidden_state=last_hidden_state,
487
+ pooler_output=pooled_output,
488
+ hidden_states=encoder_outputs.hidden_states,
489
+ attentions=encoder_outputs.attentions,
490
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_instructblip": [
21
+ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "InstructBlipConfig",
23
+ "InstructBlipQFormerConfig",
24
+ "InstructBlipVisionConfig",
25
+ ],
26
+ "processing_instructblip": ["InstructBlipProcessor"],
27
+ }
28
+
29
+ try:
30
+ if not is_torch_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["modeling_instructblip"] = [
36
+ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
37
+ "InstructBlipQFormerModel",
38
+ "InstructBlipPreTrainedModel",
39
+ "InstructBlipForConditionalGeneration",
40
+ "InstructBlipVisionModel",
41
+ ]
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_instructblip import (
45
+ INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
46
+ InstructBlipConfig,
47
+ InstructBlipQFormerConfig,
48
+ InstructBlipVisionConfig,
49
+ )
50
+ from .processing_instructblip import InstructBlipProcessor
51
+
52
+ try:
53
+ if not is_torch_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ from .modeling_instructblip import (
59
+ INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
60
+ InstructBlipForConditionalGeneration,
61
+ InstructBlipPreTrainedModel,
62
+ InstructBlipQFormerModel,
63
+ InstructBlipVisionModel,
64
+ )
65
+
66
+ else:
67
+ import sys
68
+
69
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/convert_instructblip_original_to_pytorch.cpython-310.pyc ADDED
Binary file (8.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/modeling_instructblip.cpython-310.pyc ADDED
Binary file (47.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/configuration_instructblip.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ InstructBLIP model configuration"""
16
+
17
+ import os
18
+ from typing import Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
22
+ from ...utils import logging
23
+ from ..auto import CONFIG_MAPPING
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ from ..deprecated._archive_maps import INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
30
+
31
+
32
+ class InstructBlipVisionConfig(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`InstructBlipVisionModel`]. It is used to
35
+ instantiate a InstructBLIP vision encoder according to the specified arguments, defining the model architecture.
36
+ Instantiating a configuration defaults will yield a similar configuration to that of the InstructBLIP
37
+ [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+ Args:
43
+ hidden_size (`int`, *optional*, defaults to 1408):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ intermediate_size (`int`, *optional*, defaults to 6144):
46
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
47
+ num_hidden_layers (`int`, *optional*, defaults to 39):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 16):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ image_size (`int`, *optional*, defaults to 224):
52
+ The size (resolution) of each image.
53
+ patch_size (`int`, *optional*, defaults to 14):
54
+ The size (resolution) of each patch.
55
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. to 1e-5): The epsilon used by the layer
58
+ normalization layers.
59
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
60
+ The epsilon used by the layer normalization layers.
61
+ attention_dropout (`float`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the attention probabilities.
63
+ initializer_range (`float`, *optional*, defaults to 1e-10):
64
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
65
+ qkv_bias (`bool`, *optional*, defaults to `True`):
66
+ Whether to add a bias to the queries and values in the self-attention layers.
67
+
68
+ Example:
69
+
70
+ ```python
71
+ >>> from transformers import InstructBlipVisionConfig, InstructBlipVisionModel
72
+
73
+ >>> # Initializing a InstructBlipVisionConfig with Salesforce/instruct-blip-flan-t5 style configuration
74
+ >>> configuration = InstructBlipVisionConfig()
75
+
76
+ >>> # Initializing a InstructBlipVisionModel (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
77
+ >>> model = InstructBlipVisionModel(configuration)
78
+
79
+ >>> # Accessing the model configuration
80
+ >>> configuration = model.config
81
+ ```"""
82
+
83
+ model_type = "instructblip_vision_model"
84
+
85
+ def __init__(
86
+ self,
87
+ hidden_size=1408,
88
+ intermediate_size=6144,
89
+ num_hidden_layers=39,
90
+ num_attention_heads=16,
91
+ image_size=224,
92
+ patch_size=14,
93
+ hidden_act="gelu",
94
+ layer_norm_eps=1e-6,
95
+ attention_dropout=0.0,
96
+ initializer_range=1e-10,
97
+ qkv_bias=True,
98
+ **kwargs,
99
+ ):
100
+ super().__init__(**kwargs)
101
+
102
+ self.hidden_size = hidden_size
103
+ self.intermediate_size = intermediate_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.patch_size = patch_size
107
+ self.image_size = image_size
108
+ self.initializer_range = initializer_range
109
+ self.attention_dropout = attention_dropout
110
+ self.layer_norm_eps = layer_norm_eps
111
+ self.hidden_act = hidden_act
112
+ self.qkv_bias = qkv_bias
113
+
114
+ @classmethod
115
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
116
+ cls._set_token_in_kwargs(kwargs)
117
+
118
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
119
+
120
+ # get the vision config dict if we are loading from InstructBlipConfig
121
+ if config_dict.get("model_type") == "instructblip":
122
+ config_dict = config_dict["vision_config"]
123
+
124
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
125
+ logger.warning(
126
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
127
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
128
+ )
129
+
130
+ return cls.from_dict(config_dict, **kwargs)
131
+
132
+
133
+ class InstructBlipQFormerConfig(PretrainedConfig):
134
+ r"""
135
+ This is the configuration class to store the configuration of a [`InstructBlipQFormerModel`]. It is used to
136
+ instantiate a InstructBLIP Querying Transformer (Q-Former) model according to the specified arguments, defining the
137
+ model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
138
+ the InstructBLIP [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5)
139
+ architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
140
+ Read the documentation from [`PretrainedConfig`] for more information.
141
+
142
+ Note that [`InstructBlipQFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
143
+
144
+ Args:
145
+ vocab_size (`int`, *optional*, defaults to 30522):
146
+ Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
147
+ the `inputs_ids` passed when calling the model.
148
+ hidden_size (`int`, *optional*, defaults to 768):
149
+ Dimensionality of the encoder layers and the pooler layer.
150
+ num_hidden_layers (`int`, *optional*, defaults to 12):
151
+ Number of hidden layers in the Transformer encoder.
152
+ num_attention_heads (`int`, *optional*, defaults to 12):
153
+ Number of attention heads for each attention layer in the Transformer encoder.
154
+ intermediate_size (`int`, *optional*, defaults to 3072):
155
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
156
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
157
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
158
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
159
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
160
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
161
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
162
+ The dropout ratio for the attention probabilities.
163
+ max_position_embeddings (`int`, *optional*, defaults to 512):
164
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
165
+ just in case (e.g., 512 or 1024 or 2048).
166
+ initializer_range (`float`, *optional*, defaults to 0.02):
167
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
168
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
169
+ The epsilon used by the layer normalization layers.
170
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
171
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
172
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
173
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
174
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
175
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
176
+ cross_attention_frequency (`int`, *optional*, defaults to 2):
177
+ The frequency of adding cross-attention to the Transformer layers.
178
+ encoder_hidden_size (`int`, *optional*, defaults to 1408):
179
+ The hidden size of the hidden states for cross-attention.
180
+
181
+ Examples:
182
+
183
+ ```python
184
+ >>> from transformers import InstructBlipQFormerConfig, InstructBlipQFormerModel
185
+
186
+ >>> # Initializing a InstructBLIP Salesforce/instruct-blip-flan-t5 style configuration
187
+ >>> configuration = InstructBlipQFormerConfig()
188
+
189
+ >>> # Initializing a model (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
190
+ >>> model = InstructBlipQFormerModel(configuration)
191
+ >>> # Accessing the model configuration
192
+ >>> configuration = model.config
193
+ ```"""
194
+
195
+ model_type = "instructblip_qformer"
196
+
197
+ def __init__(
198
+ self,
199
+ vocab_size=30522,
200
+ hidden_size=768,
201
+ num_hidden_layers=12,
202
+ num_attention_heads=12,
203
+ intermediate_size=3072,
204
+ hidden_act="gelu",
205
+ hidden_dropout_prob=0.1,
206
+ attention_probs_dropout_prob=0.1,
207
+ max_position_embeddings=512,
208
+ initializer_range=0.02,
209
+ layer_norm_eps=1e-12,
210
+ pad_token_id=0,
211
+ position_embedding_type="absolute",
212
+ cross_attention_frequency=2,
213
+ encoder_hidden_size=1408,
214
+ **kwargs,
215
+ ):
216
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
217
+
218
+ self.vocab_size = vocab_size
219
+ self.hidden_size = hidden_size
220
+ self.num_hidden_layers = num_hidden_layers
221
+ self.num_attention_heads = num_attention_heads
222
+ self.hidden_act = hidden_act
223
+ self.intermediate_size = intermediate_size
224
+ self.hidden_dropout_prob = hidden_dropout_prob
225
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
226
+ self.max_position_embeddings = max_position_embeddings
227
+ self.initializer_range = initializer_range
228
+ self.layer_norm_eps = layer_norm_eps
229
+ self.position_embedding_type = position_embedding_type
230
+ self.cross_attention_frequency = cross_attention_frequency
231
+ self.encoder_hidden_size = encoder_hidden_size
232
+
233
+ @classmethod
234
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
235
+ cls._set_token_in_kwargs(kwargs)
236
+
237
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
238
+
239
+ # get the qformer config dict if we are loading from InstructBlipConfig
240
+ if config_dict.get("model_type") == "instructblip":
241
+ config_dict = config_dict["qformer_config"]
242
+
243
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
244
+ logger.warning(
245
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
246
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
247
+ )
248
+
249
+ return cls.from_dict(config_dict, **kwargs)
250
+
251
+
252
+ class InstructBlipConfig(PretrainedConfig):
253
+ r"""
254
+ [`InstructBlipConfig`] is the configuration class to store the configuration of a
255
+ [`InstructBlipForConditionalGeneration`]. It is used to instantiate a InstructBLIP model according to the specified
256
+ arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
257
+ the defaults will yield a similar configuration to that of the InstructBLIP
258
+ [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
259
+
260
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
261
+ documentation from [`PretrainedConfig`] for more information.
262
+
263
+ Args:
264
+ vision_config (`dict`, *optional*):
265
+ Dictionary of configuration options used to initialize [`InstructBlipVisionConfig`].
266
+ qformer_config (`dict`, *optional*):
267
+ Dictionary of configuration options used to initialize [`InstructBlipQFormerConfig`].
268
+ text_config (`dict`, *optional*):
269
+ Dictionary of configuration options used to initialize any [`PretrainedConfig`].
270
+ num_query_tokens (`int`, *optional*, defaults to 32):
271
+ The number of query tokens passed through the Transformer.
272
+
273
+ kwargs (*optional*):
274
+ Dictionary of keyword arguments.
275
+
276
+ Example:
277
+
278
+ ```python
279
+ >>> from transformers import (
280
+ ... InstructBlipVisionConfig,
281
+ ... InstructBlipQFormerConfig,
282
+ ... OPTConfig,
283
+ ... InstructBlipConfig,
284
+ ... InstructBlipForConditionalGeneration,
285
+ ... )
286
+
287
+ >>> # Initializing a InstructBlipConfig with Salesforce/instruct-blip-flan-t5 style configuration
288
+ >>> configuration = InstructBlipConfig()
289
+
290
+ >>> # Initializing a InstructBlipForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
291
+ >>> model = InstructBlipForConditionalGeneration(configuration)
292
+
293
+ >>> # Accessing the model configuration
294
+ >>> configuration = model.config
295
+
296
+ >>> # We can also initialize a InstructBlipConfig from a InstructBlipVisionConfig, InstructBlipQFormerConfig and any PretrainedConfig
297
+
298
+ >>> # Initializing InstructBLIP vision, InstructBLIP Q-Former and language model configurations
299
+ >>> vision_config = InstructBlipVisionConfig()
300
+ >>> qformer_config = InstructBlipQFormerConfig()
301
+ >>> text_config = OPTConfig()
302
+
303
+ >>> config = InstructBlipConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
304
+ ```"""
305
+
306
+ model_type = "instructblip"
307
+
308
+ def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs):
309
+ super().__init__(**kwargs)
310
+
311
+ if vision_config is None:
312
+ vision_config = {}
313
+ logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values.")
314
+
315
+ if qformer_config is None:
316
+ qformer_config = {}
317
+ logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.")
318
+
319
+ if text_config is None:
320
+ text_config = {}
321
+ logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
322
+
323
+ self.vision_config = InstructBlipVisionConfig(**vision_config)
324
+ self.qformer_config = InstructBlipQFormerConfig(**qformer_config)
325
+ text_model_type = text_config["model_type"] if "model_type" in text_config else "opt"
326
+ self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
327
+
328
+ self.tie_word_embeddings = self.text_config.tie_word_embeddings
329
+ self.is_encoder_decoder = self.text_config.is_encoder_decoder
330
+
331
+ self.num_query_tokens = num_query_tokens
332
+ self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
333
+ self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
334
+ self.initializer_factor = 1.0
335
+ self.initializer_range = 0.02
336
+
337
+ @classmethod
338
+ def from_vision_qformer_text_configs(
339
+ cls,
340
+ vision_config: InstructBlipVisionConfig,
341
+ qformer_config: InstructBlipQFormerConfig,
342
+ text_config: PretrainedConfig,
343
+ **kwargs,
344
+ ):
345
+ r"""
346
+ Instantiate a [`InstructBlipConfig`] (or a derived class) from a InstructBLIP vision model, Q-Former and
347
+ language model configurations.
348
+
349
+ Returns:
350
+ [`InstructBlipConfig`]: An instance of a configuration object
351
+ """
352
+
353
+ return cls(
354
+ vision_config=vision_config.to_dict(),
355
+ qformer_config=qformer_config.to_dict(),
356
+ text_config=text_config.to_dict(),
357
+ **kwargs,
358
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Convert InstructBLIP checkpoints from the original repository.
17
+
18
+ URL: https://github.com/salesforce/LAVIS/tree/main/projects/instructblip
19
+ """
20
+
21
+ import argparse
22
+
23
+ import requests
24
+ import torch
25
+
26
+ # pip3 install salesforce-lavis
27
+ # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
28
+ # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
29
+ # same for Vicuna-13b
30
+ from lavis.models import load_model_and_preprocess
31
+ from PIL import Image
32
+
33
+ from transformers import (
34
+ AutoTokenizer,
35
+ BlipImageProcessor,
36
+ InstructBlipConfig,
37
+ InstructBlipForConditionalGeneration,
38
+ InstructBlipProcessor,
39
+ InstructBlipQFormerConfig,
40
+ InstructBlipVisionConfig,
41
+ LlamaConfig,
42
+ LlamaTokenizerFast,
43
+ T5Config,
44
+ T5TokenizerFast,
45
+ )
46
+ from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
47
+
48
+
49
+ def load_demo_image():
50
+ url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
51
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
52
+
53
+ return image
54
+
55
+
56
+ # here we list all keys to be renamed (original name on the left, our name on the right)
57
+ def create_rename_keys(config):
58
+ rename_keys = []
59
+ # fmt: off
60
+
61
+ # vision encoder
62
+ rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
63
+ rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
64
+ rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
65
+ rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
66
+ rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
67
+ rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
68
+
69
+ for i in range(config.vision_config.num_hidden_layers):
70
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight"))
71
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias"))
72
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight"))
73
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias"))
74
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight"))
75
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",))
76
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias"))
77
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight"))
78
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias"))
79
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight"))
80
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias"))
81
+
82
+ # QFormer
83
+ rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight"))
84
+ rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias"))
85
+
86
+ # fmt: on
87
+ return rename_keys
88
+
89
+
90
+ def rename_key(dct, old, new):
91
+ val = dct.pop(old)
92
+ dct[new] = val
93
+
94
+
95
+ def read_in_q_v_bias(state_dict, config):
96
+ for i in range(config.vision_config.num_hidden_layers):
97
+ # read in original q and v biases
98
+ q_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias")
99
+ v_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias")
100
+
101
+ # next, set bias in the state dict
102
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
103
+ state_dict[f"vision_model.encoder.layers.{i}.self_attn.qkv.bias"] = qkv_bias
104
+
105
+
106
+ def get_blip2_config(model_name):
107
+ image_size = 364 if "coco" in model_name else 224
108
+ vision_config = InstructBlipVisionConfig(image_size=image_size).to_dict()
109
+
110
+ # make sure the models have proper bos_token_id and eos_token_id set (important for generation)
111
+ # seems like flan-T5 models don't have bos_token_id properly set?
112
+ if "t5-xl" in model_name:
113
+ text_config = T5Config.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1).to_dict()
114
+ elif "t5-xxl" in model_name:
115
+ text_config = T5Config.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1).to_dict()
116
+ elif "vicuna-7b" in model_name:
117
+ text_config = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf", vocab_size=32001).to_dict()
118
+ elif "vicuna-13b" in model_name:
119
+ text_config = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf", vocab_size=32001).to_dict()
120
+ else:
121
+ raise ValueError("Model name not supported")
122
+
123
+ # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
124
+ qformer_config = InstructBlipQFormerConfig(vocab_size=30523).to_dict()
125
+ config = InstructBlipConfig(vision_config=vision_config, text_config=text_config, qformer_config=qformer_config)
126
+
127
+ return config, image_size
128
+
129
+
130
+ @torch.no_grad()
131
+ def convert_blip2_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
132
+ """
133
+ Copy/paste/tweak model's weights to Transformers design.
134
+ """
135
+ qformer_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased", truncation_side="left")
136
+ qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"})
137
+
138
+ if "t5" in model_name:
139
+ tokenizer = T5TokenizerFast.from_pretrained("google/flan-t5-xl", truncation_side="left")
140
+ elif "vicuna" in model_name:
141
+ # the following was used in the original implementation:
142
+ # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
143
+ # tokenizer.add_special_tokens({"pad_token": "[PAD]"})
144
+ # tokenizer.add_special_tokens({"bos_token": "</s>"})
145
+ # tokenizer.add_special_tokens({"eos_token": "</s>"})
146
+ # tokenizer.add_special_tokens({"unk_token": "</s>"})
147
+ tokenizer = LlamaTokenizerFast.from_pretrained(
148
+ "huggyllama/llama-7b", truncation_side="left", bos_token="</s>", unk_token="</s>"
149
+ )
150
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
151
+
152
+ config, image_size = get_blip2_config(model_name)
153
+ hf_model = InstructBlipForConditionalGeneration(config).eval()
154
+
155
+ model_name_to_original = {
156
+ "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
157
+ "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
158
+ "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
159
+ "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
160
+ }
161
+
162
+ name, type = model_name_to_original[model_name]
163
+
164
+ # load original model
165
+ print("Loading original model...")
166
+ hf_model_device = "cuda:1" if torch.cuda.is_available() else "cpu"
167
+ lavis_device = "cuda:2" if torch.cuda.is_available() else "cpu"
168
+ original_model, vis_processors, _ = load_model_and_preprocess(
169
+ name=name, model_type=type, is_eval=True, device=lavis_device
170
+ )
171
+ original_model.eval()
172
+ print("Done!")
173
+
174
+ # update state dict keys
175
+ state_dict = original_model.state_dict()
176
+ rename_keys = create_rename_keys(config)
177
+ for src, dest in rename_keys:
178
+ rename_key(state_dict, src, dest)
179
+
180
+ # some keys can be renamed efficiently
181
+ for key, val in state_dict.copy().items():
182
+ val = state_dict.pop(key)
183
+ if key.startswith("Qformer.bert"):
184
+ key = key.replace("Qformer.bert", "qformer")
185
+ if "attention.self" in key:
186
+ key = key.replace("self", "attention")
187
+ if "llm_proj" in key:
188
+ key = key.replace("llm_proj", "language_projection")
189
+ if "t5_proj" in key:
190
+ key = key.replace("t5_proj", "language_projection")
191
+ if key.startswith("llm_model"):
192
+ key = key.replace("llm_model", "language_model")
193
+ if key.startswith("t5"):
194
+ key = key.replace("t5", "language")
195
+ state_dict[key] = val
196
+
197
+ # read in qv biases
198
+ read_in_q_v_bias(state_dict, config)
199
+
200
+ # note: weights get loaded in torch.float32 by default
201
+ hf_model.load_state_dict(state_dict, strict=True)
202
+
203
+ image = load_demo_image()
204
+ prompt = "What is unusual about this image?"
205
+
206
+ # create processor
207
+ image_processor = BlipImageProcessor(
208
+ size={"height": image_size, "width": image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD
209
+ )
210
+ processor = InstructBlipProcessor(
211
+ image_processor=image_processor,
212
+ tokenizer=tokenizer,
213
+ qformer_tokenizer=qformer_tokenizer,
214
+ )
215
+ inputs = processor(images=image, text=prompt, return_tensors="pt").to(hf_model_device)
216
+
217
+ # make sure processor creates exact same pixel values
218
+ original_pixel_values = vis_processors["eval"](image).unsqueeze(0).to(lavis_device)
219
+ pixel_values = inputs.pixel_values
220
+ assert torch.allclose(original_pixel_values.to(pixel_values.device), pixel_values)
221
+
222
+ original_model.to(lavis_device)
223
+ hf_model.to(hf_model_device)
224
+ with torch.no_grad():
225
+ if "vicuna" in model_name:
226
+ original_logits = original_model({"image": original_pixel_values, "text_input": [prompt]}).logits
227
+ logits = hf_model(**inputs).logits
228
+ else:
229
+ original_logits = original_model(
230
+ {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]}
231
+ ).logits
232
+ label_input_ids = tokenizer("\n", return_tensors="pt").input_ids.to(hf_model_device)
233
+ labels = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100)
234
+ logits = hf_model(**inputs, labels=labels).logits
235
+
236
+ print("First values of original logits:", original_logits[0, :3, :3])
237
+ print("First values of HF logits:", logits[0, :3, :3])
238
+
239
+ # assert values
240
+ assert original_logits.shape == logits.shape
241
+ atol = 1e-4 if "vicuna" in model_name else 1e-5
242
+ assert torch.allclose(original_logits.to(logits.device), logits, atol=atol)
243
+ print("Looks ok!")
244
+
245
+ print("Generating with original model...")
246
+ original_outputs = original_model.generate({"image": original_pixel_values, "prompt": prompt}, num_beams=5)
247
+
248
+ # important: we need to cast the weights of the HF model to the appropriate type
249
+ print("Generating with HF model...")
250
+ outputs = hf_model.generate(
251
+ **inputs,
252
+ do_sample=False,
253
+ num_beams=5,
254
+ max_length=256,
255
+ min_length=1,
256
+ top_p=0.9,
257
+ repetition_penalty=1.5,
258
+ length_penalty=1.0,
259
+ temperature=1,
260
+ )
261
+ if "vicuna" in model_name:
262
+ # convert output id 0 to 2 (eos_token_id)
263
+ # TODO add this in the generate method?
264
+ outputs[outputs == 0] = 2
265
+ print("Original generation:", original_outputs)
266
+ output_text = processor.batch_decode(outputs, skip_special_tokens=True)
267
+ output_text = [text.strip() for text in output_text]
268
+ print("HF generation:", output_text)
269
+
270
+ if pytorch_dump_folder_path is not None:
271
+ processor.save_pretrained(pytorch_dump_folder_path)
272
+ hf_model.save_pretrained(pytorch_dump_folder_path)
273
+
274
+ if push_to_hub:
275
+ processor.push_to_hub(f"Salesforce/{model_name}")
276
+ hf_model.push_to_hub(f"Salesforce/{model_name}")
277
+
278
+
279
+ if __name__ == "__main__":
280
+ parser = argparse.ArgumentParser()
281
+ choices = [
282
+ "instructblip-vicuna-7b",
283
+ "instructblip-vicuna-13b",
284
+ "instructblip-flan-t5-xl",
285
+ "instructblip-flan-t5-xxl",
286
+ ]
287
+ parser.add_argument(
288
+ "--model_name",
289
+ default="instructblip-flan-t5-xl",
290
+ choices=choices,
291
+ type=str,
292
+ help="Path to hf config.json of model to convert",
293
+ )
294
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
295
+ parser.add_argument(
296
+ "--push_to_hub",
297
+ action="store_true",
298
+ help="Whether to push the model and processor to the hub after converting",
299
+ )
300
+
301
+ args = parser.parse_args()
302
+
303
+ convert_blip2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/modeling_instructblip.py ADDED
@@ -0,0 +1,1567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch InstructBLIP model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPooling,
31
+ BaseModelOutputWithPoolingAndCrossAttentions,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import (
36
+ ModelOutput,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ )
42
+ from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM
43
+ from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ _CHECKPOINT_FOR_DOC = "Salesforce/instructblip-flan-t5-xl"
49
+
50
+
51
+ from ..deprecated._archive_maps import INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
52
+
53
+
54
+ @dataclass
55
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2ForConditionalGenerationModelOutput with Blip2->InstructBlip
56
+ class InstructBlipForConditionalGenerationModelOutput(ModelOutput):
57
+ """
58
+ Class defining the outputs of [`InstructBlipForConditionalGeneration`].
59
+
60
+ Args:
61
+ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
62
+ Language modeling loss from the language model.
63
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
64
+ Prediction scores of the language modeling head of the language model.
65
+ vision_outputs (`BaseModelOutputWithPooling`):
66
+ Outputs of the vision encoder.
67
+ qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
68
+ Outputs of the Q-Former (Querying Transformer).
69
+ language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
70
+ Outputs of the language model.
71
+ """
72
+
73
+ loss: Optional[Tuple[torch.FloatTensor]] = None
74
+ logits: Optional[Tuple[torch.FloatTensor]] = None
75
+ vision_outputs: Optional[torch.FloatTensor] = None
76
+ qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
77
+ language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
78
+
79
+ def to_tuple(self) -> Tuple[Any]:
80
+ return tuple(
81
+ self[k]
82
+ if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
83
+ else getattr(self, k).to_tuple()
84
+ for k in self.keys()
85
+ )
86
+
87
+
88
+ # Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->InstructBlip
89
+ class InstructBlipVisionEmbeddings(nn.Module):
90
+ def __init__(self, config: InstructBlipVisionConfig):
91
+ super().__init__()
92
+ self.config = config
93
+ self.embed_dim = config.hidden_size
94
+ self.image_size = config.image_size
95
+ self.patch_size = config.patch_size
96
+
97
+ self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
98
+
99
+ self.patch_embedding = nn.Conv2d(
100
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
101
+ )
102
+
103
+ self.num_patches = (self.image_size // self.patch_size) ** 2
104
+ self.num_positions = self.num_patches + 1
105
+
106
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
107
+
108
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
109
+ batch_size = pixel_values.shape[0]
110
+ target_dtype = self.patch_embedding.weight.dtype
111
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
112
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
113
+
114
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
115
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
116
+ embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
117
+ return embeddings
118
+
119
+
120
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2Attention with Blip2->InstructBlip
121
+ class InstructBlipAttention(nn.Module):
122
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
123
+
124
+ def __init__(self, config):
125
+ super().__init__()
126
+ self.config = config
127
+ self.embed_dim = config.hidden_size
128
+ self.num_heads = config.num_attention_heads
129
+ self.head_dim = self.embed_dim // self.num_heads
130
+ if self.head_dim * self.num_heads != self.embed_dim:
131
+ raise ValueError(
132
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
133
+ f" {self.num_heads})."
134
+ )
135
+ self.scale = self.head_dim**-0.5
136
+ self.dropout = nn.Dropout(config.attention_dropout)
137
+
138
+ # small tweak here compared to CLIP, no bias here
139
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
140
+
141
+ if config.qkv_bias:
142
+ q_bias = nn.Parameter(torch.zeros(self.embed_dim))
143
+ v_bias = nn.Parameter(torch.zeros(self.embed_dim))
144
+ else:
145
+ q_bias = None
146
+ v_bias = None
147
+
148
+ if q_bias is not None:
149
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
150
+ self.qkv.bias = nn.Parameter(qkv_bias)
151
+
152
+ self.projection = nn.Linear(self.embed_dim, self.embed_dim)
153
+
154
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
155
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
156
+
157
+ def forward(
158
+ self,
159
+ hidden_states: torch.Tensor,
160
+ head_mask: Optional[torch.Tensor] = None,
161
+ output_attentions: Optional[bool] = False,
162
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
163
+ """Input shape: Batch x Time x Channel"""
164
+
165
+ bsz, tgt_len, embed_dim = hidden_states.size()
166
+
167
+ mixed_qkv = self.qkv(hidden_states)
168
+
169
+ mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
170
+ 2, 0, 3, 1, 4
171
+ )
172
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
173
+
174
+ # Take the dot product between "query" and "key" to get the raw attention scores.
175
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
176
+
177
+ attention_scores = attention_scores * self.scale
178
+
179
+ # Normalize the attention scores to probabilities.
180
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
181
+
182
+ # This is actually dropping out entire tokens to attend to, which might
183
+ # seem a bit unusual, but is taken from the original Transformer paper.
184
+ attention_probs = self.dropout(attention_probs)
185
+
186
+ # Mask heads if we want to
187
+ if head_mask is not None:
188
+ attention_probs = attention_probs * head_mask
189
+
190
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
191
+
192
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
193
+ context_layer = context_layer.reshape(new_context_layer_shape)
194
+
195
+ output = self.projection(context_layer)
196
+
197
+ outputs = (output, attention_probs) if output_attentions else (output, None)
198
+
199
+ return outputs
200
+
201
+
202
+ # Copied from transformers.models.blip.modeling_blip.BlipMLP
203
+ class InstructBlipMLP(nn.Module):
204
+ def __init__(self, config):
205
+ super().__init__()
206
+ self.config = config
207
+ self.activation_fn = ACT2FN[config.hidden_act]
208
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
209
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
210
+
211
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
212
+ hidden_states = self.fc1(hidden_states)
213
+ hidden_states = self.activation_fn(hidden_states)
214
+ hidden_states = self.fc2(hidden_states)
215
+ return hidden_states
216
+
217
+
218
+ # Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->InstructBlip
219
+ class InstructBlipEncoderLayer(nn.Module):
220
+ def __init__(self, config: InstructBlipConfig):
221
+ super().__init__()
222
+ self.embed_dim = config.hidden_size
223
+ self.self_attn = InstructBlipAttention(config)
224
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
225
+ self.mlp = InstructBlipMLP(config)
226
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
227
+
228
+ def forward(
229
+ self,
230
+ hidden_states: torch.Tensor,
231
+ attention_mask: torch.Tensor,
232
+ output_attentions: Optional[bool] = False,
233
+ ) -> Tuple[torch.FloatTensor]:
234
+ """
235
+ Args:
236
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
237
+ attention_mask (`torch.FloatTensor`): attention mask of size
238
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
239
+ `(config.encoder_attention_heads,)`.
240
+ output_attentions (`bool`, *optional*):
241
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
242
+ returned tensors for more detail.
243
+ """
244
+ residual = hidden_states
245
+
246
+ hidden_states = self.layer_norm1(hidden_states)
247
+ hidden_states, attn_weights = self.self_attn(
248
+ hidden_states=hidden_states,
249
+ head_mask=attention_mask,
250
+ output_attentions=output_attentions,
251
+ )
252
+ hidden_states = hidden_states + residual
253
+ residual = hidden_states
254
+ hidden_states = self.layer_norm2(hidden_states)
255
+ hidden_states = self.mlp(hidden_states)
256
+
257
+ hidden_states = hidden_states + residual
258
+
259
+ outputs = (hidden_states,)
260
+
261
+ if output_attentions:
262
+ outputs += (attn_weights,)
263
+
264
+ return outputs
265
+
266
+
267
+ class InstructBlipPreTrainedModel(PreTrainedModel):
268
+ """
269
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
270
+ models.
271
+ """
272
+
273
+ config_class = InstructBlipConfig
274
+ base_model_prefix = "blip"
275
+ supports_gradient_checkpointing = True
276
+ _no_split_modules = [
277
+ "InstructBlipQFormerEmbeddings",
278
+ "InstructBlipAttention",
279
+ "InstructBlipQFormerMultiHeadAttention",
280
+ "InstructBlipQFormerSelfOutput",
281
+ ]
282
+ _keep_in_fp32_modules = []
283
+
284
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2PreTrainedModel._init_weights with Blip2->InstructBlip
285
+ def _init_weights(self, module):
286
+ """Initialize the weights"""
287
+ factor = self.config.initializer_range
288
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
289
+ module.weight.data.normal_(mean=0.0, std=factor)
290
+ if hasattr(module, "bias") and module.bias is not None:
291
+ module.bias.data.zero_()
292
+
293
+ if isinstance(module, InstructBlipVisionEmbeddings):
294
+ if hasattr(self.config, "vision_config"):
295
+ factor = self.config.vision_config.initializer_range
296
+ nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
297
+ nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
298
+
299
+ elif isinstance(module, nn.LayerNorm):
300
+ module.bias.data.zero_()
301
+ module.weight.data.fill_(1.0)
302
+ elif isinstance(module, nn.Linear) and module.bias is not None:
303
+ module.bias.data.zero_()
304
+
305
+
306
+ INSTRUCTBLIP_START_DOCSTRING = r"""
307
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
308
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
309
+ etc.)
310
+
311
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
312
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
313
+ and behavior.
314
+
315
+ Parameters:
316
+ config ([`InstructBlipConfig`]): Model configuration class with all the parameters of the model.
317
+ Initializing with a config file does not load the weights associated with the model, only the
318
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
319
+ """
320
+
321
+ INSTRUCTBLIP_VISION_INPUTS_DOCSTRING = r"""
322
+ Args:
323
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
324
+ Pixel values. Pixel values can be obtained using [`InstructBlipProcessor`]. See
325
+ [`InstructBlipProcessor.__call__`] for details.
326
+ output_attentions (`bool`, *optional*):
327
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
328
+ tensors for more detail.
329
+ output_hidden_states (`bool`, *optional*):
330
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
331
+ more detail.
332
+ return_dict (`bool`, *optional*):
333
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
334
+ """
335
+
336
+ INSTRUCTBLIP_INPUTS_DOCSTRING = r"""
337
+ Args:
338
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
339
+ Pixel values. Pixel values can be obtained using [`InstructBlipProcessor`]. See
340
+ [`InstructBlipProcessor.__call__`] for details.
341
+
342
+ qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
343
+ Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
344
+ to serve as text prompt, which the Q-Former model will encode.
345
+
346
+ Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
347
+ details.
348
+
349
+ [What are input IDs?](../glossary#input-ids)
350
+
351
+ qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
352
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
353
+
354
+ - 1 for tokens that are **not masked**,
355
+ - 0 for tokens that are **masked**.
356
+
357
+ [What are attention masks?](../glossary#attention-mask)
358
+
359
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
360
+ Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
361
+ provided to serve as text prompt, which the language model can continue.
362
+
363
+ Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
364
+ details.
365
+
366
+ [What are input IDs?](../glossary#input-ids)
367
+
368
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
369
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
370
+
371
+ - 1 for tokens that are **not masked**,
372
+ - 0 for tokens that are **masked**.
373
+
374
+ [What are attention masks?](../glossary#attention-mask)
375
+
376
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
377
+ Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an
378
+ encoder-decoder language model (like T5) is used.
379
+
380
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
381
+ [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)
382
+
383
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
384
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
385
+ be used by default.
386
+
387
+ Only relevant in case an encoder-decoder language model (like T5) is used.
388
+
389
+ output_attentions (`bool`, *optional*):
390
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
391
+ tensors for more detail.
392
+ output_hidden_states (`bool`, *optional*):
393
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
394
+ more detail.
395
+ return_dict (`bool`, *optional*):
396
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
397
+ """
398
+
399
+
400
+ # Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->InstructBlip
401
+ class InstructBlipEncoder(nn.Module):
402
+ """
403
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
404
+ [`InstructBlipEncoderLayer`].
405
+
406
+ Args:
407
+ config (`InstructBlipConfig`):
408
+ The corresponding vision configuration for the `InstructBlipEncoder`.
409
+ """
410
+
411
+ def __init__(self, config: InstructBlipConfig):
412
+ super().__init__()
413
+ self.config = config
414
+ self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
415
+ self.gradient_checkpointing = False
416
+
417
+ def forward(
418
+ self,
419
+ inputs_embeds,
420
+ attention_mask: Optional[torch.Tensor] = None,
421
+ output_attentions: Optional[bool] = None,
422
+ output_hidden_states: Optional[bool] = None,
423
+ return_dict: Optional[bool] = None,
424
+ ) -> Union[Tuple, BaseModelOutput]:
425
+ r"""
426
+ Args:
427
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
428
+ Embedded representation of the inputs. Should be float, not int tokens.
429
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
430
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
431
+
432
+ - 1 for tokens that are **not masked**,
433
+ - 0 for tokens that are **masked**.
434
+
435
+ [What are attention masks?](../glossary#attention-mask)
436
+ output_attentions (`bool`, *optional*):
437
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
438
+ returned tensors for more detail.
439
+ output_hidden_states (`bool`, *optional*):
440
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
441
+ for more detail.
442
+ return_dict (`bool`, *optional*):
443
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
444
+ """
445
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
446
+ output_hidden_states = (
447
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
448
+ )
449
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
450
+
451
+ encoder_states = () if output_hidden_states else None
452
+ all_attentions = () if output_attentions else None
453
+
454
+ hidden_states = inputs_embeds
455
+ for idx, encoder_layer in enumerate(self.layers):
456
+ if output_hidden_states:
457
+ encoder_states = encoder_states + (hidden_states,)
458
+ if self.gradient_checkpointing and self.training:
459
+ layer_outputs = self._gradient_checkpointing_func(
460
+ encoder_layer.__call__,
461
+ hidden_states,
462
+ attention_mask,
463
+ output_attentions,
464
+ )
465
+ else:
466
+ layer_outputs = encoder_layer(
467
+ hidden_states,
468
+ attention_mask,
469
+ output_attentions=output_attentions,
470
+ )
471
+
472
+ hidden_states = layer_outputs[0]
473
+
474
+ if output_attentions:
475
+ all_attentions = all_attentions + (layer_outputs[1],)
476
+
477
+ if output_hidden_states:
478
+ encoder_states = encoder_states + (hidden_states,)
479
+
480
+ if not return_dict:
481
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
482
+ return BaseModelOutput(
483
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
484
+ )
485
+
486
+
487
+ # Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->InstructBlip, BLIP->INSTRUCTBLIP
488
+ class InstructBlipVisionModel(InstructBlipPreTrainedModel):
489
+ main_input_name = "pixel_values"
490
+ config_class = InstructBlipVisionConfig
491
+
492
+ def __init__(self, config: InstructBlipVisionConfig):
493
+ super().__init__(config)
494
+ self.config = config
495
+ embed_dim = config.hidden_size
496
+
497
+ self.embeddings = InstructBlipVisionEmbeddings(config)
498
+ self.encoder = InstructBlipEncoder(config)
499
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
500
+
501
+ self.post_init()
502
+
503
+ @add_start_docstrings_to_model_forward(INSTRUCTBLIP_VISION_INPUTS_DOCSTRING)
504
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=InstructBlipVisionConfig)
505
+ def forward(
506
+ self,
507
+ pixel_values: Optional[torch.FloatTensor] = None,
508
+ output_attentions: Optional[bool] = None,
509
+ output_hidden_states: Optional[bool] = None,
510
+ return_dict: Optional[bool] = None,
511
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
512
+ r"""
513
+ Returns:
514
+
515
+ """
516
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
517
+ output_hidden_states = (
518
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
519
+ )
520
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
521
+
522
+ if pixel_values is None:
523
+ raise ValueError("You have to specify pixel_values")
524
+
525
+ hidden_states = self.embeddings(pixel_values)
526
+
527
+ encoder_outputs = self.encoder(
528
+ inputs_embeds=hidden_states,
529
+ output_attentions=output_attentions,
530
+ output_hidden_states=output_hidden_states,
531
+ return_dict=return_dict,
532
+ )
533
+
534
+ last_hidden_state = encoder_outputs[0]
535
+ last_hidden_state = self.post_layernorm(last_hidden_state)
536
+
537
+ pooled_output = last_hidden_state[:, 0, :]
538
+ pooled_output = self.post_layernorm(pooled_output)
539
+
540
+ if not return_dict:
541
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
542
+
543
+ return BaseModelOutputWithPooling(
544
+ last_hidden_state=last_hidden_state,
545
+ pooler_output=pooled_output,
546
+ hidden_states=encoder_outputs.hidden_states,
547
+ attentions=encoder_outputs.attentions,
548
+ )
549
+
550
+ def get_input_embeddings(self):
551
+ return self.embeddings
552
+
553
+
554
+ class InstructBlipQFormerMultiHeadAttention(nn.Module):
555
+ def __init__(self, config, is_cross_attention=False):
556
+ super().__init__()
557
+ self.config = config
558
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
559
+ raise ValueError(
560
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
561
+ % (config.hidden_size, config.num_attention_heads)
562
+ )
563
+
564
+ self.num_attention_heads = config.num_attention_heads
565
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
566
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
567
+
568
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
569
+ if is_cross_attention:
570
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
571
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
572
+ else:
573
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
574
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
575
+
576
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
577
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
578
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
579
+ self.max_position_embeddings = config.max_position_embeddings
580
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
581
+ self.save_attention = False
582
+
583
+ def save_attn_gradients(self, attn_gradients):
584
+ self.attn_gradients = attn_gradients
585
+
586
+ def get_attn_gradients(self):
587
+ return self.attn_gradients
588
+
589
+ def save_attention_map(self, attention_map):
590
+ self.attention_map = attention_map
591
+
592
+ def get_attention_map(self):
593
+ return self.attention_map
594
+
595
+ def transpose_for_scores(self, x):
596
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
597
+ x = x.view(*new_x_shape)
598
+ return x.permute(0, 2, 1, 3)
599
+
600
+ def forward(
601
+ self,
602
+ hidden_states,
603
+ attention_mask=None,
604
+ head_mask=None,
605
+ encoder_hidden_states=None,
606
+ encoder_attention_mask=None,
607
+ past_key_value=None,
608
+ output_attentions=False,
609
+ ):
610
+ # If this is instantiated as a cross-attention module, the keys
611
+ # and values come from an encoder; the attention mask needs to be
612
+ # such that the encoder's padding tokens are not attended to.
613
+ is_cross_attention = encoder_hidden_states is not None
614
+
615
+ if is_cross_attention:
616
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
617
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
618
+ attention_mask = encoder_attention_mask
619
+ elif past_key_value is not None:
620
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
621
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
622
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
623
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
624
+ else:
625
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
626
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
627
+
628
+ mixed_query_layer = self.query(hidden_states)
629
+
630
+ query_layer = self.transpose_for_scores(mixed_query_layer)
631
+
632
+ past_key_value = (key_layer, value_layer)
633
+
634
+ # Take the dot product between "query" and "key" to get the raw attention scores.
635
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
636
+
637
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
638
+ seq_length = hidden_states.size()[1]
639
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
640
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
641
+ distance = position_ids_l - position_ids_r
642
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
643
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
644
+
645
+ if self.position_embedding_type == "relative_key":
646
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
647
+ attention_scores = attention_scores + relative_position_scores
648
+ elif self.position_embedding_type == "relative_key_query":
649
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
650
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
651
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
652
+
653
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
654
+ attention_scores_dtype = attention_scores.dtype
655
+
656
+ if attention_mask is not None:
657
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
658
+ attention_scores = attention_scores + attention_mask
659
+
660
+ # Normalize the attention scores to probabilities.
661
+ attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype)
662
+
663
+ if is_cross_attention and self.save_attention:
664
+ self.save_attention_map(attention_probs)
665
+ attention_probs.register_hook(self.save_attn_gradients)
666
+
667
+ # This is actually dropping out entire tokens to attend to, which might
668
+ # seem a bit unusual, but is taken from the original Transformer paper.
669
+ attention_probs_dropped = self.dropout(attention_probs)
670
+
671
+ # Mask heads if we want to
672
+ if head_mask is not None:
673
+ attention_probs_dropped = attention_probs_dropped * head_mask
674
+
675
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
676
+
677
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
678
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
679
+ context_layer = context_layer.view(*new_context_layer_shape)
680
+
681
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
682
+
683
+ outputs = outputs + (past_key_value,)
684
+ return outputs
685
+
686
+
687
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->InstructBlipQFormer
688
+ class InstructBlipQFormerSelfOutput(nn.Module):
689
+ def __init__(self, config):
690
+ super().__init__()
691
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
692
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
693
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
694
+
695
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
696
+ hidden_states = self.dense(hidden_states)
697
+ hidden_states = self.dropout(hidden_states)
698
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
699
+ return hidden_states
700
+
701
+
702
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerAttention with Blip2->InstructBlip
703
+ class InstructBlipQFormerAttention(nn.Module):
704
+ def __init__(self, config, is_cross_attention=False):
705
+ super().__init__()
706
+ self.attention = InstructBlipQFormerMultiHeadAttention(config, is_cross_attention)
707
+ self.output = InstructBlipQFormerSelfOutput(config)
708
+ self.pruned_heads = set()
709
+
710
+ def prune_heads(self, heads):
711
+ if len(heads) == 0:
712
+ return
713
+ heads, index = find_pruneable_heads_and_indices(
714
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
715
+ )
716
+
717
+ # Prune linear layers
718
+ self.attention.query = prune_linear_layer(self.attention.query, index)
719
+ self.attention.key = prune_linear_layer(self.attention.key, index)
720
+ self.attention.value = prune_linear_layer(self.attention.value, index)
721
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
722
+
723
+ # Update hyper params and store pruned heads
724
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
725
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
726
+ self.pruned_heads = self.pruned_heads.union(heads)
727
+
728
+ def forward(
729
+ self,
730
+ hidden_states: torch.Tensor,
731
+ attention_mask: Optional[torch.FloatTensor] = None,
732
+ head_mask: Optional[torch.FloatTensor] = None,
733
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
734
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
735
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
736
+ output_attentions: Optional[bool] = False,
737
+ ) -> Tuple[torch.Tensor]:
738
+ self_outputs = self.attention(
739
+ hidden_states,
740
+ attention_mask,
741
+ head_mask,
742
+ encoder_hidden_states,
743
+ encoder_attention_mask,
744
+ past_key_value,
745
+ output_attentions,
746
+ )
747
+ attention_output = self.output(self_outputs[0], hidden_states)
748
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
749
+ return outputs
750
+
751
+
752
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->InstructBlipQFormer
753
+ class InstructBlipQFormerIntermediate(nn.Module):
754
+ def __init__(self, config):
755
+ super().__init__()
756
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
757
+ if isinstance(config.hidden_act, str):
758
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
759
+ else:
760
+ self.intermediate_act_fn = config.hidden_act
761
+
762
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
763
+ hidden_states = self.dense(hidden_states)
764
+ hidden_states = self.intermediate_act_fn(hidden_states)
765
+ return hidden_states
766
+
767
+
768
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->InstructBlipQFormer
769
+ class InstructBlipQFormerOutput(nn.Module):
770
+ def __init__(self, config):
771
+ super().__init__()
772
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
773
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
774
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
775
+
776
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
777
+ hidden_states = self.dense(hidden_states)
778
+ hidden_states = self.dropout(hidden_states)
779
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
780
+ return hidden_states
781
+
782
+
783
+ class InstructBlipQFormerLayer(nn.Module):
784
+ def __init__(self, config, layer_idx):
785
+ super().__init__()
786
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
787
+ self.seq_len_dim = 1
788
+ self.attention = InstructBlipQFormerAttention(config)
789
+
790
+ self.layer_idx = layer_idx
791
+
792
+ if layer_idx % config.cross_attention_frequency == 0:
793
+ self.crossattention = InstructBlipQFormerAttention(config, is_cross_attention=True)
794
+ self.has_cross_attention = True
795
+ else:
796
+ self.has_cross_attention = False
797
+
798
+ self.intermediate = InstructBlipQFormerIntermediate(config)
799
+ self.output = InstructBlipQFormerOutput(config)
800
+
801
+ self.intermediate_query = InstructBlipQFormerIntermediate(config)
802
+ self.output_query = InstructBlipQFormerOutput(config)
803
+
804
+ def forward(
805
+ self,
806
+ hidden_states,
807
+ attention_mask=None,
808
+ head_mask=None,
809
+ encoder_hidden_states=None,
810
+ encoder_attention_mask=None,
811
+ past_key_value=None,
812
+ output_attentions=False,
813
+ query_length=0,
814
+ ):
815
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
816
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
817
+ self_attention_outputs = self.attention(
818
+ hidden_states,
819
+ attention_mask,
820
+ head_mask,
821
+ output_attentions=output_attentions,
822
+ past_key_value=self_attn_past_key_value,
823
+ )
824
+ attention_output = self_attention_outputs[0]
825
+ outputs = self_attention_outputs[1:-1]
826
+
827
+ present_key_value = self_attention_outputs[-1]
828
+
829
+ if query_length > 0:
830
+ query_attention_output = attention_output[:, :query_length, :]
831
+
832
+ if self.has_cross_attention:
833
+ if encoder_hidden_states is None:
834
+ raise ValueError("encoder_hidden_states must be given for cross-attention layers")
835
+ cross_attention_outputs = self.crossattention(
836
+ query_attention_output,
837
+ attention_mask,
838
+ head_mask,
839
+ encoder_hidden_states,
840
+ encoder_attention_mask,
841
+ output_attentions=output_attentions,
842
+ )
843
+ query_attention_output = cross_attention_outputs[0]
844
+ # add cross attentions if we output attention weights
845
+ outputs = outputs + cross_attention_outputs[1:-1]
846
+
847
+ layer_output = apply_chunking_to_forward(
848
+ self.feed_forward_chunk_query,
849
+ self.chunk_size_feed_forward,
850
+ self.seq_len_dim,
851
+ query_attention_output,
852
+ )
853
+
854
+ if attention_output.shape[1] > query_length:
855
+ layer_output_text = apply_chunking_to_forward(
856
+ self.feed_forward_chunk,
857
+ self.chunk_size_feed_forward,
858
+ self.seq_len_dim,
859
+ attention_output[:, query_length:, :],
860
+ )
861
+ layer_output = torch.cat([layer_output, layer_output_text], dim=1)
862
+ else:
863
+ layer_output = apply_chunking_to_forward(
864
+ self.feed_forward_chunk,
865
+ self.chunk_size_feed_forward,
866
+ self.seq_len_dim,
867
+ attention_output,
868
+ )
869
+ outputs = (layer_output,) + outputs
870
+
871
+ outputs = outputs + (present_key_value,)
872
+
873
+ return outputs
874
+
875
+ def feed_forward_chunk(self, attention_output):
876
+ intermediate_output = self.intermediate(attention_output)
877
+ layer_output = self.output(intermediate_output, attention_output)
878
+ return layer_output
879
+
880
+ def feed_forward_chunk_query(self, attention_output):
881
+ intermediate_output = self.intermediate_query(attention_output)
882
+ layer_output = self.output_query(intermediate_output, attention_output)
883
+ return layer_output
884
+
885
+
886
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerEncoder with Blip2->InstructBlip
887
+ class InstructBlipQFormerEncoder(nn.Module):
888
+ def __init__(self, config):
889
+ super().__init__()
890
+ self.config = config
891
+ self.layer = nn.ModuleList(
892
+ [InstructBlipQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
893
+ )
894
+ self.gradient_checkpointing = False
895
+
896
+ def forward(
897
+ self,
898
+ hidden_states,
899
+ attention_mask=None,
900
+ head_mask=None,
901
+ encoder_hidden_states=None,
902
+ encoder_attention_mask=None,
903
+ past_key_values=None,
904
+ use_cache=None,
905
+ output_attentions=False,
906
+ output_hidden_states=False,
907
+ return_dict=True,
908
+ query_length=0,
909
+ ):
910
+ all_hidden_states = () if output_hidden_states else None
911
+ all_self_attentions = () if output_attentions else None
912
+ all_cross_attentions = () if output_attentions else None
913
+
914
+ next_decoder_cache = () if use_cache else None
915
+
916
+ for i in range(self.config.num_hidden_layers):
917
+ layer_module = self.layer[i]
918
+ if output_hidden_states:
919
+ all_hidden_states = all_hidden_states + (hidden_states,)
920
+
921
+ layer_head_mask = head_mask[i] if head_mask is not None else None
922
+ past_key_value = past_key_values[i] if past_key_values is not None else None
923
+
924
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
925
+ if use_cache:
926
+ logger.warning(
927
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
928
+ )
929
+ use_cache = False
930
+ layer_outputs = self._gradient_checkpointing_func(
931
+ layer_module.__call__,
932
+ hidden_states,
933
+ attention_mask,
934
+ layer_head_mask,
935
+ encoder_hidden_states,
936
+ encoder_attention_mask,
937
+ )
938
+ else:
939
+ layer_outputs = layer_module(
940
+ hidden_states,
941
+ attention_mask,
942
+ layer_head_mask,
943
+ encoder_hidden_states,
944
+ encoder_attention_mask,
945
+ past_key_value,
946
+ output_attentions,
947
+ query_length,
948
+ )
949
+
950
+ hidden_states = layer_outputs[0]
951
+ if use_cache:
952
+ next_decoder_cache += (layer_outputs[-1],)
953
+ if output_attentions:
954
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
955
+ if layer_module.has_cross_attention:
956
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
957
+
958
+ if output_hidden_states:
959
+ all_hidden_states = all_hidden_states + (hidden_states,)
960
+
961
+ if not return_dict:
962
+ return tuple(
963
+ v
964
+ for v in [
965
+ hidden_states,
966
+ next_decoder_cache,
967
+ all_hidden_states,
968
+ all_self_attentions,
969
+ all_cross_attentions,
970
+ ]
971
+ if v is not None
972
+ )
973
+ return BaseModelOutputWithPastAndCrossAttentions(
974
+ last_hidden_state=hidden_states,
975
+ past_key_values=next_decoder_cache,
976
+ hidden_states=all_hidden_states,
977
+ attentions=all_self_attentions,
978
+ cross_attentions=all_cross_attentions,
979
+ )
980
+
981
+
982
+ class InstructBlipQFormerEmbeddings(nn.Module):
983
+ """Construct the embeddings from word and position embeddings."""
984
+
985
+ def __init__(self, config):
986
+ super().__init__()
987
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
988
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
989
+
990
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
991
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
992
+
993
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
994
+ self.register_buffer(
995
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
996
+ )
997
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
998
+
999
+ self.config = config
1000
+
1001
+ def forward(
1002
+ self,
1003
+ input_ids=None,
1004
+ position_ids=None,
1005
+ query_embeds=None,
1006
+ past_key_values_length=0,
1007
+ ):
1008
+ if input_ids is not None:
1009
+ seq_length = input_ids.size()[1]
1010
+ else:
1011
+ seq_length = 0
1012
+
1013
+ if position_ids is None:
1014
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone()
1015
+
1016
+ if input_ids is not None:
1017
+ embeddings = self.word_embeddings(input_ids)
1018
+ if self.position_embedding_type == "absolute":
1019
+ position_embeddings = self.position_embeddings(position_ids.to(embeddings.device))
1020
+ embeddings = embeddings + position_embeddings
1021
+
1022
+ if query_embeds is not None:
1023
+ embeddings = torch.cat((query_embeds, embeddings), dim=1)
1024
+ else:
1025
+ embeddings = query_embeds
1026
+
1027
+ embeddings = embeddings.to(self.layernorm.weight.dtype)
1028
+ embeddings = self.layernorm(embeddings)
1029
+ embeddings = self.dropout(embeddings)
1030
+ return embeddings
1031
+
1032
+
1033
+ class InstructBlipQFormerModel(InstructBlipPreTrainedModel):
1034
+ """
1035
+ Querying Transformer (Q-Former), used in InstructBLIP. Slightly modified from BLIP-2 as it also takes the
1036
+ instruction as input.
1037
+ """
1038
+
1039
+ def __init__(self, config: InstructBlipQFormerConfig):
1040
+ super().__init__(config)
1041
+ self.config = config
1042
+
1043
+ self.embeddings = InstructBlipQFormerEmbeddings(config)
1044
+
1045
+ self.encoder = InstructBlipQFormerEncoder(config)
1046
+
1047
+ self.post_init()
1048
+
1049
+ def get_input_embeddings(self):
1050
+ return self.embeddings.word_embeddings
1051
+
1052
+ def set_input_embeddings(self, value):
1053
+ self.embeddings.word_embeddings = value
1054
+
1055
+ def _prune_heads(self, heads_to_prune):
1056
+ """
1057
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1058
+ class PreTrainedModel
1059
+ """
1060
+ for layer, heads in heads_to_prune.items():
1061
+ self.encoder.layer[layer].attention.prune_heads(heads)
1062
+
1063
+ def get_extended_attention_mask(
1064
+ self,
1065
+ attention_mask: torch.Tensor,
1066
+ input_shape: Tuple[int],
1067
+ device: torch.device,
1068
+ has_query: bool = False,
1069
+ ) -> torch.Tensor:
1070
+ """
1071
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
1072
+
1073
+ Arguments:
1074
+ attention_mask (`torch.Tensor`):
1075
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
1076
+ input_shape (`Tuple[int]`):
1077
+ The shape of the input to the model.
1078
+ device: (`torch.device`):
1079
+ The device of the input to the model.
1080
+
1081
+ Returns:
1082
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
1083
+ """
1084
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1085
+ # ourselves in which case we just need to make it broadcastable to all heads.
1086
+ if attention_mask.dim() == 3:
1087
+ extended_attention_mask = attention_mask[:, None, :, :]
1088
+ elif attention_mask.dim() == 2:
1089
+ # Provided a padding mask of dimensions [batch_size, seq_length]
1090
+ # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
1091
+ extended_attention_mask = attention_mask[:, None, None, :]
1092
+ else:
1093
+ raise ValueError(
1094
+ f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})",
1095
+ )
1096
+
1097
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
1098
+ # masked positions, this operation will create a tensor which is 0.0 for
1099
+ # positions we want to attend and -10000.0 for masked positions.
1100
+ # Since we are adding it to the raw scores before the softmax, this is
1101
+ # effectively the same as removing these entirely.
1102
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
1103
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
1104
+ return extended_attention_mask
1105
+
1106
+ def forward(
1107
+ self,
1108
+ input_ids: torch.LongTensor,
1109
+ attention_mask: Optional[torch.FloatTensor] = None,
1110
+ position_ids: Optional[torch.LongTensor] = None,
1111
+ query_embeds: Optional[torch.Tensor] = None,
1112
+ head_mask: Optional[torch.FloatTensor] = None,
1113
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1114
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1115
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1116
+ use_cache: Optional[bool] = None,
1117
+ output_attentions: Optional[bool] = None,
1118
+ output_hidden_states: Optional[bool] = None,
1119
+ return_dict: Optional[bool] = None,
1120
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
1121
+ r"""
1122
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1123
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1124
+ the model is configured as a decoder.
1125
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1126
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1127
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1128
+ - 1 for tokens that are **not masked**,
1129
+ - 0 for tokens that are **masked**.
1130
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
1131
+ shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
1132
+ value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
1133
+ used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
1134
+ value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
1135
+ `(batch_size, sequence_length)`.
1136
+ use_cache (`bool`, *optional*):
1137
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1138
+ `past_key_values`).
1139
+ """
1140
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1141
+ output_hidden_states = (
1142
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1143
+ )
1144
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1145
+
1146
+ if input_ids is None and query_embeds is None:
1147
+ raise ValueError("You have to specify query_embeds when input_ids is None")
1148
+
1149
+ # past_key_values_length
1150
+ past_key_values_length = (
1151
+ past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
1152
+ )
1153
+
1154
+ query_length = query_embeds.shape[1] if query_embeds is not None else 0
1155
+
1156
+ embedding_output = self.embeddings(
1157
+ input_ids=input_ids,
1158
+ position_ids=position_ids,
1159
+ query_embeds=query_embeds,
1160
+ past_key_values_length=past_key_values_length,
1161
+ )
1162
+
1163
+ input_shape = embedding_output.size()[:-1]
1164
+ batch_size, seq_length = input_shape
1165
+ device = embedding_output.device
1166
+
1167
+ if attention_mask is None:
1168
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
1169
+
1170
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1171
+ # ourselves in which case we just need to make it broadcastable to all heads.
1172
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
1173
+
1174
+ # If a 2D or 3D attention mask is provided for the cross-attention
1175
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1176
+ if encoder_hidden_states is not None:
1177
+ if isinstance(encoder_hidden_states, list):
1178
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
1179
+ else:
1180
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1181
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1182
+
1183
+ if isinstance(encoder_attention_mask, list):
1184
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
1185
+ elif encoder_attention_mask is None:
1186
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1187
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1188
+ else:
1189
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1190
+ else:
1191
+ encoder_extended_attention_mask = None
1192
+
1193
+ # Prepare head mask if needed
1194
+ # 1.0 in head_mask indicate we keep the head
1195
+ # attention_probs has shape bsz x n_heads x N x N
1196
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1197
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1198
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1199
+
1200
+ encoder_outputs = self.encoder(
1201
+ embedding_output,
1202
+ attention_mask=extended_attention_mask,
1203
+ head_mask=head_mask,
1204
+ encoder_hidden_states=encoder_hidden_states,
1205
+ encoder_attention_mask=encoder_extended_attention_mask,
1206
+ past_key_values=past_key_values,
1207
+ use_cache=use_cache,
1208
+ output_attentions=output_attentions,
1209
+ output_hidden_states=output_hidden_states,
1210
+ return_dict=return_dict,
1211
+ query_length=query_length,
1212
+ )
1213
+ sequence_output = encoder_outputs[0]
1214
+ pooled_output = sequence_output[:, 0, :]
1215
+
1216
+ if not return_dict:
1217
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1218
+
1219
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1220
+ last_hidden_state=sequence_output,
1221
+ pooler_output=pooled_output,
1222
+ past_key_values=encoder_outputs.past_key_values,
1223
+ hidden_states=encoder_outputs.hidden_states,
1224
+ attentions=encoder_outputs.attentions,
1225
+ cross_attentions=encoder_outputs.cross_attentions,
1226
+ )
1227
+
1228
+
1229
+ @add_start_docstrings(
1230
+ """
1231
+ InstructBLIP Model for generating text given an image and an optional text prompt. The model consists of a vision
1232
+ encoder, Querying Transformer (Q-Former) and a language model.
1233
+
1234
+ One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
1235
+ the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
1236
+ """,
1237
+ INSTRUCTBLIP_START_DOCSTRING,
1238
+ )
1239
+ class InstructBlipForConditionalGeneration(InstructBlipPreTrainedModel):
1240
+ config_class = InstructBlipConfig
1241
+ main_input_name = "pixel_values"
1242
+
1243
+ def __init__(self, config: InstructBlipConfig):
1244
+ super().__init__(config)
1245
+
1246
+ self.vision_model = InstructBlipVisionModel(config.vision_config)
1247
+
1248
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
1249
+ self.qformer = InstructBlipQFormerModel(config.qformer_config)
1250
+
1251
+ self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
1252
+
1253
+ if config.use_decoder_only_language_model:
1254
+ language_model = AutoModelForCausalLM.from_config(config.text_config)
1255
+ else:
1256
+ language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
1257
+
1258
+ if language_model._no_split_modules is not None:
1259
+ self._no_split_modules.extend(language_model._no_split_modules)
1260
+
1261
+ if language_model._keep_in_fp32_modules is not None:
1262
+ self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules)
1263
+
1264
+ self.language_model = language_model
1265
+
1266
+ # Initialize weights and apply final processing
1267
+ self.post_init()
1268
+
1269
+ def get_input_embeddings(self):
1270
+ return self.language_model.get_input_embeddings()
1271
+
1272
+ def set_input_embeddings(self, value):
1273
+ self.language_model.set_input_embeddings(value)
1274
+
1275
+ def set_output_embeddings(self, new_embeddings):
1276
+ self.language_model.set_output_embeddings(new_embeddings)
1277
+
1278
+ def get_output_embeddings(self) -> nn.Module:
1279
+ return self.language_model.get_output_embeddings()
1280
+
1281
+ def get_encoder(self):
1282
+ return self.language_model.get_encoder()
1283
+
1284
+ def get_decoder(self):
1285
+ return self.language_model.get_decoder()
1286
+
1287
+ def _tie_weights(self):
1288
+ if not self.config.use_decoder_only_language_model:
1289
+ self.language_model.encoder.embed_tokens = self.language_model.shared
1290
+ self.language_model.decoder.embed_tokens = self.language_model.shared
1291
+
1292
+ def _preprocess_accelerate(self):
1293
+ r"""
1294
+ Some pre-processing hacks to make the model `accelerate` compatible. Check
1295
+ https://github.com/huggingface/transformers/pull/21707 for more details.
1296
+ """
1297
+ hf_device_map = self.hf_device_map
1298
+
1299
+ if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
1300
+ # warn users about unexpected behavior when using multi-GPU + InstructBLIP + `accelerate`.
1301
+ logger.warning(
1302
+ "The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
1303
+ " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
1304
+ " Please pass a `device_map` that contains `language_model` to remove this warning."
1305
+ " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
1306
+ " more details on creating a `device_map` for large models.",
1307
+ )
1308
+
1309
+ if hasattr(self.language_model, "_hf_hook"):
1310
+ self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
1311
+
1312
+ @add_start_docstrings_to_model_forward(INSTRUCTBLIP_INPUTS_DOCSTRING)
1313
+ @replace_return_docstrings(
1314
+ output_type=InstructBlipForConditionalGenerationModelOutput, config_class=InstructBlipVisionConfig
1315
+ )
1316
+ def forward(
1317
+ self,
1318
+ pixel_values: torch.FloatTensor,
1319
+ qformer_input_ids: torch.FloatTensor,
1320
+ qformer_attention_mask: Optional[torch.LongTensor] = None,
1321
+ input_ids: Optional[torch.FloatTensor] = None,
1322
+ attention_mask: Optional[torch.LongTensor] = None,
1323
+ decoder_input_ids: Optional[torch.LongTensor] = None,
1324
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
1325
+ output_attentions: Optional[bool] = None,
1326
+ output_hidden_states: Optional[bool] = None,
1327
+ labels: Optional[torch.LongTensor] = None,
1328
+ return_dict: Optional[bool] = None,
1329
+ ) -> Union[Tuple, InstructBlipForConditionalGenerationModelOutput]:
1330
+ r"""
1331
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1332
+ Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size -
1333
+ 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
1334
+ config.vocab_size]`
1335
+
1336
+ Returns:
1337
+
1338
+ Examples:
1339
+
1340
+ ```python
1341
+ >>> from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration
1342
+ >>> import torch
1343
+ >>> from PIL import Image
1344
+ >>> import requests
1345
+
1346
+ >>> model = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b")
1347
+ >>> processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
1348
+
1349
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
1350
+ >>> model.to(device) # doctest: +IGNORE_RESULT
1351
+
1352
+ >>> url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
1353
+ >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
1354
+ >>> prompt = "What is unusual about this image?"
1355
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device)
1356
+
1357
+ >>> outputs = model.generate(
1358
+ ... **inputs,
1359
+ ... do_sample=False,
1360
+ ... num_beams=5,
1361
+ ... max_length=256,
1362
+ ... min_length=1,
1363
+ ... top_p=0.9,
1364
+ ... repetition_penalty=1.5,
1365
+ ... length_penalty=1.0,
1366
+ ... temperature=1,
1367
+ ... )
1368
+ >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
1369
+ >>> print(generated_text)
1370
+ The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV, which is parked in the middle of a busy city street. This is an unconventional approach to ironing clothes, as it requires the man to balance himself and his ironing equipment on top of the vehicle while navigating through traffic. Additionally, the presence of taxis and other vehicles in the scene further emphasizes the unusual nature of this situation.
1371
+ ```"""
1372
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1373
+
1374
+ # step 1: forward the images through the vision encoder,
1375
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
1376
+ vision_outputs = self.vision_model(
1377
+ pixel_values=pixel_values,
1378
+ output_attentions=output_attentions,
1379
+ output_hidden_states=output_hidden_states,
1380
+ return_dict=return_dict,
1381
+ )
1382
+ image_embeds = vision_outputs[0]
1383
+
1384
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
1385
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1386
+
1387
+ # difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former
1388
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1389
+ query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
1390
+ if qformer_attention_mask is None:
1391
+ qformer_attention_mask = torch.ones_like(qformer_input_ids)
1392
+ qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
1393
+ query_outputs = self.qformer(
1394
+ input_ids=qformer_input_ids,
1395
+ attention_mask=qformer_attention_mask,
1396
+ query_embeds=query_tokens,
1397
+ encoder_hidden_states=image_embeds,
1398
+ encoder_attention_mask=image_attention_mask,
1399
+ output_attentions=output_attentions,
1400
+ output_hidden_states=output_hidden_states,
1401
+ return_dict=return_dict,
1402
+ )
1403
+ query_output = query_outputs[0][:, : query_tokens.size(1), :]
1404
+
1405
+ # step 3: use the language model, conditioned on the query outputs and the prompt
1406
+ language_model_inputs = self.language_projection(query_output)
1407
+ language_model_attention_mask = torch.ones(
1408
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
1409
+ )
1410
+
1411
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
1412
+
1413
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
1414
+
1415
+ if attention_mask is None:
1416
+ attention_mask = torch.ones_like(input_ids)
1417
+ attention_mask = torch.cat([language_model_attention_mask.to(attention_mask.device), attention_mask], dim=1)
1418
+
1419
+ if self.config.use_decoder_only_language_model:
1420
+ outputs = self.language_model(
1421
+ inputs_embeds=inputs_embeds,
1422
+ attention_mask=attention_mask,
1423
+ output_attentions=output_attentions,
1424
+ output_hidden_states=output_hidden_states,
1425
+ return_dict=return_dict,
1426
+ )
1427
+ logits = outputs.logits if return_dict else outputs[0]
1428
+ loss = None
1429
+ # we compute the loss here since we need to take into account the sequence length of the query embeds
1430
+ if labels is not None:
1431
+ labels = labels.to(logits.device)
1432
+ logits = logits[:, -labels.size(1) :, :]
1433
+ # Shift so that tokens < n predict n
1434
+ shift_logits = logits[..., :-1, :].contiguous()
1435
+ shift_labels = labels[..., 1:].contiguous().to(logits.device)
1436
+
1437
+ # Flatten the tokens
1438
+ loss_fct = CrossEntropyLoss(reduction="mean")
1439
+
1440
+ loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
1441
+ else:
1442
+ outputs = self.language_model(
1443
+ inputs_embeds=inputs_embeds,
1444
+ attention_mask=attention_mask,
1445
+ decoder_input_ids=decoder_input_ids,
1446
+ decoder_attention_mask=decoder_attention_mask,
1447
+ output_attentions=output_attentions,
1448
+ output_hidden_states=output_hidden_states,
1449
+ return_dict=return_dict,
1450
+ labels=labels,
1451
+ )
1452
+ loss = outputs.loss if return_dict else outputs[0]
1453
+ logits = outputs.logits if return_dict else outputs[1]
1454
+
1455
+ if not return_dict:
1456
+ output = (logits, vision_outputs, query_outputs, outputs)
1457
+ return ((loss,) + output) if loss is not None else output
1458
+
1459
+ return InstructBlipForConditionalGenerationModelOutput(
1460
+ loss=loss,
1461
+ logits=logits,
1462
+ vision_outputs=vision_outputs,
1463
+ qformer_outputs=query_outputs,
1464
+ language_model_outputs=outputs,
1465
+ )
1466
+
1467
+ @torch.no_grad()
1468
+ def generate(
1469
+ self,
1470
+ pixel_values: torch.FloatTensor,
1471
+ qformer_input_ids: Optional[torch.LongTensor] = None,
1472
+ qformer_attention_mask: Optional[torch.LongTensor] = None,
1473
+ input_ids: Optional[torch.LongTensor] = None,
1474
+ attention_mask: Optional[torch.LongTensor] = None,
1475
+ **generate_kwargs,
1476
+ ) -> torch.LongTensor:
1477
+ """
1478
+ Overrides `generate` function to be able to use the model as a conditional generator.
1479
+
1480
+ Args:
1481
+ pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
1482
+ Input images to be processed.
1483
+ qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
1484
+ The sequence used as a prompt to be fed to the Q-Former module.
1485
+ qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
1486
+ Mask to avoid performing attention on padding token indices.
1487
+ input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
1488
+ The sequence used as a prompt for the generation.
1489
+ attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
1490
+ Mask to avoid performing attention on padding token indices.
1491
+
1492
+ Returns:
1493
+ captions (list): A list of strings of length batch_size * num_captions.
1494
+ """
1495
+ if hasattr(self, "hf_device_map"):
1496
+ # preprocess for `accelerate`
1497
+ self._preprocess_accelerate()
1498
+
1499
+ batch_size = pixel_values.shape[0]
1500
+ image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state
1501
+
1502
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
1503
+
1504
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
1505
+ query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
1506
+ if qformer_attention_mask is None:
1507
+ qformer_attention_mask = torch.ones_like(qformer_input_ids)
1508
+ qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
1509
+ query_outputs = self.qformer(
1510
+ input_ids=qformer_input_ids,
1511
+ attention_mask=qformer_attention_mask,
1512
+ query_embeds=query_tokens,
1513
+ encoder_hidden_states=image_embeds,
1514
+ encoder_attention_mask=image_attention_mask,
1515
+ return_dict=True,
1516
+ )
1517
+ query_output = query_outputs.last_hidden_state[:, : query_tokens.size(1), :]
1518
+
1519
+ language_model_inputs = self.language_projection(query_output)
1520
+ language_attention_mask = torch.ones(
1521
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
1522
+ )
1523
+
1524
+ if input_ids is None:
1525
+ input_ids = (
1526
+ torch.LongTensor([[self.config.text_config.bos_token_id]])
1527
+ .repeat(batch_size, 1)
1528
+ .to(image_embeds.device)
1529
+ )
1530
+ if attention_mask is None:
1531
+ attention_mask = torch.ones_like(input_ids)
1532
+ attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1)
1533
+
1534
+ # concatenate query embeddings with prompt embeddings
1535
+ inputs_embeds = self.get_input_embeddings()(input_ids)
1536
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
1537
+
1538
+ # add image_embeds length to max_length, so that the final max_length in counted only on token embeds
1539
+ # -1 is to account for the prepended BOS after `generate.`
1540
+ if not self.language_model.config.is_encoder_decoder:
1541
+ generate_kwargs["max_length"] = generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1
1542
+ generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1]
1543
+
1544
+ outputs = self.language_model.generate(
1545
+ inputs_embeds=inputs_embeds,
1546
+ attention_mask=attention_mask,
1547
+ **generate_kwargs,
1548
+ )
1549
+
1550
+ # this is a temporary workaround to be consistent with other generation models and
1551
+ # have BOS as the first token, even though under the hood we are calling LM with embeds
1552
+ if not self.language_model.config.is_encoder_decoder:
1553
+ # the InstructBLIP authors used inconsistent tokenizer/model files during training,
1554
+ # with the tokenizer's bos token being set to </s> which has ID=2,
1555
+ # whereas the model's text config has bos token id = 0
1556
+ bos_token_id = (
1557
+ 2
1558
+ if self.config.text_config.architectures[0] == "LLaMAForCausalLM"
1559
+ else self.config.text_config.bos_token_id
1560
+ )
1561
+ bos_tokens = torch.LongTensor([[bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device)
1562
+ if not isinstance(outputs, torch.Tensor):
1563
+ outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1)
1564
+ else:
1565
+ outputs = torch.cat([bos_tokens, outputs], dim=-1)
1566
+
1567
+ return outputs
llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/processing_instructblip.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for InstructBLIP. Largely copy of Blip2Processor with addition of a tokenizer for the Q-Former.
17
+ """
18
+
19
+ import os
20
+ from typing import List, Optional, Union
21
+
22
+ from ...image_processing_utils import BatchFeature
23
+ from ...image_utils import ImageInput
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
26
+ from ...utils import TensorType
27
+ from ..auto import AutoTokenizer
28
+
29
+
30
+ class InstructBlipProcessor(ProcessorMixin):
31
+ r"""
32
+ Constructs an InstructBLIP processor which wraps a BLIP image processor and a LLaMa/T5 tokenizer into a single
33
+ processor.
34
+
35
+ [`InstructBlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the
36
+ docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
37
+
38
+ Args:
39
+ image_processor (`BlipImageProcessor`):
40
+ An instance of [`BlipImageProcessor`]. The image processor is a required input.
41
+ tokenizer (`AutoTokenizer`):
42
+ An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
43
+ qformer_tokenizer (`AutoTokenizer`):
44
+ An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
45
+ """
46
+
47
+ attributes = ["image_processor", "tokenizer"]
48
+ image_processor_class = "BlipImageProcessor"
49
+ tokenizer_class = "AutoTokenizer"
50
+
51
+ def __init__(self, image_processor, tokenizer, qformer_tokenizer):
52
+ super().__init__(image_processor, tokenizer)
53
+
54
+ # add QFormer tokenizer
55
+ self.qformer_tokenizer = qformer_tokenizer
56
+
57
+ def __call__(
58
+ self,
59
+ images: ImageInput = None,
60
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
61
+ add_special_tokens: bool = True,
62
+ padding: Union[bool, str, PaddingStrategy] = False,
63
+ truncation: Union[bool, str, TruncationStrategy] = None,
64
+ max_length: Optional[int] = None,
65
+ stride: int = 0,
66
+ pad_to_multiple_of: Optional[int] = None,
67
+ return_attention_mask: Optional[bool] = None,
68
+ return_overflowing_tokens: bool = False,
69
+ return_special_tokens_mask: bool = False,
70
+ return_offsets_mapping: bool = False,
71
+ return_token_type_ids: bool = False,
72
+ return_length: bool = False,
73
+ verbose: bool = True,
74
+ return_tensors: Optional[Union[str, TensorType]] = None,
75
+ **kwargs,
76
+ ) -> BatchFeature:
77
+ """
78
+ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
79
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
80
+
81
+ Please refer to the docstring of the above two methods for more information.
82
+ """
83
+ if images is None and text is None:
84
+ raise ValueError("You have to specify at least images or text.")
85
+
86
+ encoding = BatchFeature()
87
+
88
+ if text is not None:
89
+ text_encoding = self.tokenizer(
90
+ text=text,
91
+ add_special_tokens=add_special_tokens,
92
+ padding=padding,
93
+ truncation=truncation,
94
+ max_length=max_length,
95
+ stride=stride,
96
+ pad_to_multiple_of=pad_to_multiple_of,
97
+ return_attention_mask=return_attention_mask,
98
+ return_overflowing_tokens=return_overflowing_tokens,
99
+ return_special_tokens_mask=return_special_tokens_mask,
100
+ return_offsets_mapping=return_offsets_mapping,
101
+ return_token_type_ids=return_token_type_ids,
102
+ return_length=return_length,
103
+ verbose=verbose,
104
+ return_tensors=return_tensors,
105
+ **kwargs,
106
+ )
107
+ encoding.update(text_encoding)
108
+ qformer_text_encoding = self.qformer_tokenizer(
109
+ text=text,
110
+ add_special_tokens=add_special_tokens,
111
+ padding=padding,
112
+ truncation=truncation,
113
+ max_length=max_length,
114
+ stride=stride,
115
+ pad_to_multiple_of=pad_to_multiple_of,
116
+ return_attention_mask=return_attention_mask,
117
+ return_overflowing_tokens=return_overflowing_tokens,
118
+ return_special_tokens_mask=return_special_tokens_mask,
119
+ return_offsets_mapping=return_offsets_mapping,
120
+ return_token_type_ids=return_token_type_ids,
121
+ return_length=return_length,
122
+ verbose=verbose,
123
+ return_tensors=return_tensors,
124
+ **kwargs,
125
+ )
126
+ encoding["qformer_input_ids"] = qformer_text_encoding.pop("input_ids")
127
+ encoding["qformer_attention_mask"] = qformer_text_encoding.pop("attention_mask")
128
+
129
+ if images is not None:
130
+ image_encoding = self.image_processor(images, return_tensors=return_tensors)
131
+ encoding.update(image_encoding)
132
+
133
+ return encoding
134
+
135
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
136
+ def batch_decode(self, *args, **kwargs):
137
+ """
138
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
139
+ refer to the docstring of this method for more information.
140
+ """
141
+ return self.tokenizer.batch_decode(*args, **kwargs)
142
+
143
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
144
+ def decode(self, *args, **kwargs):
145
+ """
146
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
147
+ the docstring of this method for more information.
148
+ """
149
+ return self.tokenizer.decode(*args, **kwargs)
150
+
151
+ @property
152
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
153
+ def model_input_names(self):
154
+ tokenizer_input_names = self.tokenizer.model_input_names
155
+ image_processor_input_names = self.image_processor.model_input_names
156
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
157
+
158
+ # overwrite to save the Q-Former tokenizer in a separate folder
159
+ def save_pretrained(self, save_directory, **kwargs):
160
+ if os.path.isfile(save_directory):
161
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
162
+ os.makedirs(save_directory, exist_ok=True)
163
+ qformer_tokenizer_path = os.path.join(save_directory, "qformer_tokenizer")
164
+ self.qformer_tokenizer.save_pretrained(qformer_tokenizer_path)
165
+ return super().save_pretrained(save_directory, **kwargs)
166
+
167
+ # overwrite to load the Q-Former tokenizer from a separate folder
168
+ @classmethod
169
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
170
+ qformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="qformer_tokenizer")
171
+ args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)
172
+ args.append(qformer_tokenizer)
173
+ return cls(*args)
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
28
+ "tokenization_lxmert": ["LxmertTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_tokenizers_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_lxmert_fast"] = ["LxmertTokenizerFast"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_lxmert"] = [
46
+ "LxmertEncoder",
47
+ "LxmertForPreTraining",
48
+ "LxmertForQuestionAnswering",
49
+ "LxmertModel",
50
+ "LxmertPreTrainedModel",
51
+ "LxmertVisualFeatureEncoder",
52
+ "LxmertXLayer",
53
+ ]
54
+
55
+ try:
56
+ if not is_tf_available():
57
+ raise OptionalDependencyNotAvailable()
58
+ except OptionalDependencyNotAvailable:
59
+ pass
60
+ else:
61
+ _import_structure["modeling_tf_lxmert"] = [
62
+ "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
63
+ "TFLxmertForPreTraining",
64
+ "TFLxmertMainLayer",
65
+ "TFLxmertModel",
66
+ "TFLxmertPreTrainedModel",
67
+ "TFLxmertVisualFeatureEncoder",
68
+ ]
69
+
70
+
71
+ if TYPE_CHECKING:
72
+ from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
73
+ from .tokenization_lxmert import LxmertTokenizer
74
+
75
+ try:
76
+ if not is_tokenizers_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ from .tokenization_lxmert_fast import LxmertTokenizerFast
82
+
83
+ try:
84
+ if not is_torch_available():
85
+ raise OptionalDependencyNotAvailable()
86
+ except OptionalDependencyNotAvailable:
87
+ pass
88
+ else:
89
+ from .modeling_lxmert import (
90
+ LxmertEncoder,
91
+ LxmertForPreTraining,
92
+ LxmertForQuestionAnswering,
93
+ LxmertModel,
94
+ LxmertPreTrainedModel,
95
+ LxmertVisualFeatureEncoder,
96
+ LxmertXLayer,
97
+ )
98
+
99
+ try:
100
+ if not is_tf_available():
101
+ raise OptionalDependencyNotAvailable()
102
+ except OptionalDependencyNotAvailable:
103
+ pass
104
+ else:
105
+ from .modeling_tf_lxmert import (
106
+ TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
107
+ TFLxmertForPreTraining,
108
+ TFLxmertMainLayer,
109
+ TFLxmertModel,
110
+ TFLxmertPreTrainedModel,
111
+ TFLxmertVisualFeatureEncoder,
112
+ )
113
+
114
+ else:
115
+ import sys
116
+
117
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc ADDED
Binary file (7.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc ADDED
Binary file (45.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc ADDED
Binary file (51.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018, Hao Tan, Mohit Bansal
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LXMERT model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class LxmertConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used
31
+ to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating
32
+ a configuration with the defaults will yield a similar configuration to that of the Lxmert
33
+ [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 30522):
41
+ Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`].
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_attention_heads (`int`, *optional*, defaults to 12):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ num_qa_labels (`int`, *optional*, defaults to 9500):
48
+ This represents the total number of different question answering (QA) labels there are. If using more than
49
+ one dataset with QA, the user will need to account for the total number of labels that all of the datasets
50
+ have in total.
51
+ num_object_labels (`int`, *optional*, defaults to 1600):
52
+ This represents the total number of semantically unique objects that lxmert will be able to classify a
53
+ pooled-object feature as belonging too.
54
+ num_attr_labels (`int`, *optional*, defaults to 400):
55
+ This represents the total number of semantically unique attributes that lxmert will be able to classify a
56
+ pooled-object feature as possessing.
57
+ intermediate_size (`int`, *optional*, defaults to 3072):
58
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
59
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
62
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
64
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
65
+ The dropout ratio for the attention probabilities.
66
+ max_position_embeddings (`int`, *optional*, defaults to 512):
67
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
68
+ just in case (e.g., 512 or 1024 or 2048).
69
+ type_vocab_size (`int`, *optional*, defaults to 2):
70
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
74
+ The epsilon used by the layer normalization layers.
75
+ l_layers (`int`, *optional*, defaults to 9):
76
+ Number of hidden layers in the Transformer language encoder.
77
+ x_layers (`int`, *optional*, defaults to 5):
78
+ Number of hidden layers in the Transformer cross modality encoder.
79
+ r_layers (`int`, *optional*, defaults to 5):
80
+ Number of hidden layers in the Transformer visual encoder.
81
+ visual_feat_dim (`int`, *optional*, defaults to 2048):
82
+ This represents the last dimension of the pooled-object features used as input for the model, representing
83
+ the size of each object feature itself.
84
+ visual_pos_dim (`int`, *optional*, defaults to 4):
85
+ This represents the number of spacial features that are mixed into the visual features. The default is set
86
+ to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)
87
+ visual_loss_normalizer (`float`, *optional*, defaults to 6.67):
88
+ This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one
89
+ decided to train with multiple vision-based loss objectives.
90
+ task_matched (`bool`, *optional*, defaults to `True`):
91
+ This task is used for sentence-image matching. If the sentence correctly describes the image the label will
92
+ be 1. If the sentence does not correctly describe the image, the label will be 0.
93
+ task_mask_lm (`bool`, *optional*, defaults to `True`):
94
+ Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss
95
+ objective.
96
+ task_obj_predict (`bool`, *optional*, defaults to `True`):
97
+ Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.
98
+ task_qa (`bool`, *optional*, defaults to `True`):
99
+ Whether or not to add the question-answering loss to the objective
100
+ visual_obj_loss (`bool`, *optional*, defaults to `True`):
101
+ Whether or not to calculate the object-prediction loss objective
102
+ visual_attr_loss (`bool`, *optional*, defaults to `True`):
103
+ Whether or not to calculate the attribute-prediction loss objective
104
+ visual_feat_loss (`bool`, *optional*, defaults to `True`):
105
+ Whether or not to calculate the feature-regression loss objective
106
+ """
107
+
108
+ model_type = "lxmert"
109
+ attribute_map = {}
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_size=30522,
114
+ hidden_size=768,
115
+ num_attention_heads=12,
116
+ num_qa_labels=9500,
117
+ num_object_labels=1600,
118
+ num_attr_labels=400,
119
+ intermediate_size=3072,
120
+ hidden_act="gelu",
121
+ hidden_dropout_prob=0.1,
122
+ attention_probs_dropout_prob=0.1,
123
+ max_position_embeddings=512,
124
+ type_vocab_size=2,
125
+ initializer_range=0.02,
126
+ layer_norm_eps=1e-12,
127
+ l_layers=9,
128
+ x_layers=5,
129
+ r_layers=5,
130
+ visual_feat_dim=2048,
131
+ visual_pos_dim=4,
132
+ visual_loss_normalizer=6.67,
133
+ task_matched=True,
134
+ task_mask_lm=True,
135
+ task_obj_predict=True,
136
+ task_qa=True,
137
+ visual_obj_loss=True,
138
+ visual_attr_loss=True,
139
+ visual_feat_loss=True,
140
+ **kwargs,
141
+ ):
142
+ self.vocab_size = vocab_size
143
+ self.hidden_size = hidden_size
144
+ self.num_attention_heads = num_attention_heads
145
+ self.hidden_act = hidden_act
146
+ self.intermediate_size = intermediate_size
147
+ self.hidden_dropout_prob = hidden_dropout_prob
148
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
149
+ self.max_position_embeddings = max_position_embeddings
150
+ self.type_vocab_size = type_vocab_size
151
+ self.initializer_range = initializer_range
152
+ self.layer_norm_eps = layer_norm_eps
153
+ self.num_qa_labels = num_qa_labels
154
+ self.num_object_labels = num_object_labels
155
+ self.num_attr_labels = num_attr_labels
156
+ self.l_layers = l_layers
157
+ self.x_layers = x_layers
158
+ self.r_layers = r_layers
159
+ self.visual_feat_dim = visual_feat_dim
160
+ self.visual_pos_dim = visual_pos_dim
161
+ self.visual_loss_normalizer = visual_loss_normalizer
162
+ self.task_matched = task_matched
163
+ self.task_mask_lm = task_mask_lm
164
+ self.task_obj_predict = task_obj_predict
165
+ self.task_qa = task_qa
166
+ self.visual_obj_loss = visual_obj_loss
167
+ self.visual_attr_loss = visual_attr_loss
168
+ self.visual_feat_loss = visual_feat_loss
169
+ self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
170
+ super().__init__(**kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert LXMERT checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
30
+ # Initialise PyTorch model
31
+ config = LxmertConfig.from_json_file(config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+ model = LxmertForPreTraining(config)
34
+
35
+ # Load weights from tf checkpoint
36
+ load_tf_weights_in_lxmert(model, config, tf_checkpoint_path)
37
+
38
+ # Save pytorch-model
39
+ print(f"Save PyTorch model to {pytorch_dump_path}")
40
+ torch.save(model.state_dict(), pytorch_dump_path)
41
+
42
+
43
+ if __name__ == "__main__":
44
+ parser = argparse.ArgumentParser()
45
+ # Required parameters
46
+ parser.add_argument(
47
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
48
+ )
49
+ parser.add_argument(
50
+ "--config_file",
51
+ default=None,
52
+ type=str,
53
+ required=True,
54
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
55
+ )
56
+ parser.add_argument(
57
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
58
+ )
59
+ args = parser.parse_args()
60
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py ADDED
@@ -0,0 +1,1434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch LXMERT model."""
16
+
17
+
18
+ import math
19
+ import os
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import Dict, Optional, Tuple, Union
23
+
24
+ import torch
25
+ from torch import nn
26
+ from torch.nn import CrossEntropyLoss, SmoothL1Loss
27
+
28
+ from ...activations import ACT2FN, gelu
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_code_sample_docstrings,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_lxmert import LxmertConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
44
+ _CONFIG_FOR_DOC = "LxmertConfig"
45
+
46
+
47
+ class GeLU(nn.Module):
48
+ def __init__(self):
49
+ super().__init__()
50
+
51
+ def forward(self, x):
52
+ return gelu(x)
53
+
54
+
55
+ @dataclass
56
+ class LxmertModelOutput(ModelOutput):
57
+ """
58
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
59
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
60
+ encoder")
61
+
62
+
63
+ Args:
64
+ language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
65
+ Sequence of hidden-states at the output of the last layer of the language encoder.
66
+ vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
67
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
68
+ pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
69
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
70
+ by a Linear layer and a Tanh activation function. The Linear
71
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
72
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
73
+ shape `(batch_size, sequence_length, hidden_size)`.
74
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
75
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
76
+ shape `(batch_size, sequence_length, hidden_size)`.
77
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
78
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
79
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
80
+ the self-attention heads.
81
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
82
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
83
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
84
+ the self-attention heads.
85
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
86
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
87
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
88
+ the self-attention heads.
89
+ """
90
+
91
+ language_output: Optional[torch.FloatTensor] = None
92
+ vision_output: Optional[torch.FloatTensor] = None
93
+ pooled_output: Optional[torch.FloatTensor] = None
94
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
95
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
96
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
97
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
98
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
99
+
100
+
101
+ @dataclass
102
+ class LxmertForQuestionAnsweringOutput(ModelOutput):
103
+ """
104
+ Output type of [`LxmertForQuestionAnswering`].
105
+
106
+ Args:
107
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
108
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
109
+ (classification) loss.k.
110
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*):
111
+ Prediction scores of question answering objective (classification).
112
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
113
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
114
+ shape `(batch_size, sequence_length, hidden_size)`.
115
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
116
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
117
+ shape `(batch_size, sequence_length, hidden_size)`.
118
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
119
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
120
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
121
+ the self-attention heads.
122
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
123
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
124
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
125
+ the self-attention heads.
126
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
127
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
128
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
129
+ the self-attention heads.
130
+ """
131
+
132
+ loss: Optional[torch.FloatTensor] = None
133
+ question_answering_score: Optional[torch.FloatTensor] = None
134
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
135
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
136
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
137
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
138
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
139
+
140
+
141
+ @dataclass
142
+ class LxmertForPreTrainingOutput(ModelOutput):
143
+ """
144
+ Output type of [`LxmertForPreTraining`].
145
+
146
+ Args:
147
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
148
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
149
+ (classification) loss.
150
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
151
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
152
+ cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`):
153
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
154
+ continuation before SoftMax).
155
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`):
156
+ Prediction scores of question answering objective (classification).
157
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
158
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
159
+ shape `(batch_size, sequence_length, hidden_size)`.
160
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
161
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
162
+ shape `(batch_size, sequence_length, hidden_size)`.
163
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
164
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
165
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
166
+ the self-attention heads.
167
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
168
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
169
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
170
+ the self-attention heads.
171
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
172
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
173
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
174
+ the self-attention heads.
175
+
176
+ """
177
+
178
+ loss: Optional[torch.FloatTensor] = None
179
+ prediction_logits: Optional[torch.FloatTensor] = None
180
+ cross_relationship_score: Optional[torch.FloatTensor] = None
181
+ question_answering_score: Optional[torch.FloatTensor] = None
182
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
183
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
184
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
185
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
186
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
187
+
188
+
189
+ def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
190
+ """Load tf checkpoints in a pytorch model."""
191
+ try:
192
+ import re
193
+
194
+ import numpy as np
195
+ import tensorflow as tf
196
+ except ImportError:
197
+ logger.error(
198
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
199
+ "https://www.tensorflow.org/install/ for installation instructions."
200
+ )
201
+ raise
202
+ tf_path = os.path.abspath(tf_checkpoint_path)
203
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
204
+ # Load weights from TF model
205
+ init_vars = tf.train.list_variables(tf_path)
206
+ names = []
207
+ arrays = []
208
+ for name, shape in init_vars:
209
+ logger.info(f"Loading TF weight {name} with shape {shape}")
210
+ array = tf.train.load_variable(tf_path, name)
211
+ names.append(name)
212
+ arrays.append(array)
213
+
214
+ for name, array in zip(names, arrays):
215
+ name = name.split("/")
216
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
217
+ # which are not required for using pretrained model
218
+ if any(
219
+ n
220
+ in [
221
+ "adam_v",
222
+ "adam_m",
223
+ "AdamWeightDecayOptimizer",
224
+ "AdamWeightDecayOptimizer_1",
225
+ "global_step",
226
+ ]
227
+ for n in name
228
+ ):
229
+ logger.info(f"Skipping {'/'.join(name)}")
230
+ continue
231
+ pointer = model
232
+ for m_name in name:
233
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
234
+ scope_names = re.split(r"_(\d+)", m_name)
235
+ else:
236
+ scope_names = [m_name]
237
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
238
+ pointer = getattr(pointer, "weight")
239
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
240
+ pointer = getattr(pointer, "bias")
241
+ elif scope_names[0] == "output_weights":
242
+ pointer = getattr(pointer, "weight")
243
+ elif scope_names[0] == "squad":
244
+ pointer = getattr(pointer, "classifier")
245
+ else:
246
+ try:
247
+ pointer = getattr(pointer, scope_names[0])
248
+ except AttributeError:
249
+ logger.info(f"Skipping {'/'.join(name)}")
250
+ continue
251
+ if len(scope_names) >= 2:
252
+ num = int(scope_names[1])
253
+ pointer = pointer[num]
254
+ if m_name[-11:] == "_embeddings":
255
+ pointer = getattr(pointer, "weight")
256
+ elif m_name == "kernel":
257
+ array = np.transpose(array)
258
+ try:
259
+ assert pointer.shape == array.shape
260
+ except AssertionError as e:
261
+ e.args += (pointer.shape, array.shape)
262
+ raise
263
+ logger.info(f"Initialize PyTorch weight {name}")
264
+ pointer.data = torch.from_numpy(array)
265
+ return model
266
+
267
+
268
+ class LxmertEmbeddings(nn.Module):
269
+ """Construct the embeddings from word, position and token_type embeddings."""
270
+
271
+ def __init__(self, config):
272
+ super().__init__()
273
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
274
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
275
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
276
+
277
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
278
+ # any TensorFlow checkpoint file
279
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
280
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
281
+
282
+ def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
283
+ if input_ids is not None:
284
+ input_shape = input_ids.size()
285
+ device = input_ids.device
286
+ else:
287
+ input_shape = inputs_embeds.size()[:-1]
288
+ device = inputs_embeds.device
289
+ seq_length = input_shape[1]
290
+
291
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
292
+ position_ids = position_ids.unsqueeze(0).expand(input_shape)
293
+
294
+ if token_type_ids is None:
295
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
296
+
297
+ if inputs_embeds is None:
298
+ inputs_embeds = self.word_embeddings(input_ids)
299
+ position_embeddings = self.position_embeddings(position_ids)
300
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
301
+
302
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
303
+ embeddings = self.LayerNorm(embeddings)
304
+ embeddings = self.dropout(embeddings)
305
+ return embeddings
306
+
307
+
308
+ class LxmertAttention(nn.Module):
309
+ def __init__(self, config, ctx_dim=None):
310
+ super().__init__()
311
+ if config.hidden_size % config.num_attention_heads != 0:
312
+ raise ValueError(
313
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
314
+ f"heads ({config.num_attention_heads})"
315
+ )
316
+ self.num_attention_heads = config.num_attention_heads
317
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
318
+ self.head_size = self.num_attention_heads * self.attention_head_size
319
+
320
+ # visual_dim = 2048
321
+ if ctx_dim is None:
322
+ ctx_dim = config.hidden_size
323
+ self.query = nn.Linear(config.hidden_size, self.head_size)
324
+ self.key = nn.Linear(ctx_dim, self.head_size)
325
+ self.value = nn.Linear(ctx_dim, self.head_size)
326
+
327
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
328
+
329
+ def transpose_for_scores(self, x):
330
+ new_x_shape = x.size()[:-1] + (
331
+ self.num_attention_heads,
332
+ self.attention_head_size,
333
+ )
334
+ x = x.view(new_x_shape)
335
+ return x.permute(0, 2, 1, 3)
336
+
337
+ def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
338
+ mixed_query_layer = self.query(hidden_states)
339
+ mixed_key_layer = self.key(context)
340
+ mixed_value_layer = self.value(context)
341
+
342
+ query_layer = self.transpose_for_scores(mixed_query_layer)
343
+ key_layer = self.transpose_for_scores(mixed_key_layer)
344
+ value_layer = self.transpose_for_scores(mixed_value_layer)
345
+
346
+ # Take the dot product between "query" and "key" to get the raw attention scores.
347
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
348
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
349
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
350
+ if attention_mask is not None:
351
+ attention_scores = attention_scores + attention_mask
352
+
353
+ # Normalize the attention scores to probabilities.
354
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
355
+
356
+ # This is actually dropping out entire tokens to attend to, which might
357
+ # seem a bit unusual, but is taken from the original Transformer paper.
358
+ attention_probs = self.dropout(attention_probs)
359
+
360
+ context_layer = torch.matmul(attention_probs, value_layer)
361
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
362
+ new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
363
+ context_layer = context_layer.view(new_context_layer_shape)
364
+
365
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
366
+ return outputs
367
+
368
+
369
+ class LxmertAttentionOutput(nn.Module):
370
+ def __init__(self, config):
371
+ super().__init__()
372
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
373
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
374
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
375
+
376
+ def forward(self, hidden_states, input_tensor):
377
+ hidden_states = self.dense(hidden_states)
378
+ hidden_states = self.dropout(hidden_states)
379
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
380
+ return hidden_states
381
+
382
+
383
+ class LxmertCrossAttentionLayer(nn.Module):
384
+ def __init__(self, config):
385
+ super().__init__()
386
+ self.att = LxmertAttention(config)
387
+ self.output = LxmertAttentionOutput(config)
388
+
389
+ def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
390
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
391
+ if output_attentions:
392
+ attention_probs = output[1]
393
+ attention_output = self.output(output[0], input_tensor)
394
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
395
+ return outputs
396
+
397
+
398
+ class LxmertSelfAttentionLayer(nn.Module):
399
+ def __init__(self, config):
400
+ super().__init__()
401
+ self.self = LxmertAttention(config)
402
+ self.output = LxmertAttentionOutput(config)
403
+
404
+ def forward(self, input_tensor, attention_mask, output_attentions=False):
405
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
406
+ output = self.self(
407
+ input_tensor,
408
+ input_tensor,
409
+ attention_mask,
410
+ output_attentions=output_attentions,
411
+ )
412
+ if output_attentions:
413
+ attention_probs = output[1]
414
+ attention_output = self.output(output[0], input_tensor)
415
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
416
+ return outputs
417
+
418
+
419
+ class LxmertIntermediate(nn.Module):
420
+ def __init__(self, config):
421
+ super().__init__()
422
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
423
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
424
+
425
+ def forward(self, hidden_states):
426
+ hidden_states = self.dense(hidden_states)
427
+ hidden_states = self.intermediate_act_fn(hidden_states)
428
+ return hidden_states
429
+
430
+
431
+ class LxmertOutput(nn.Module):
432
+ def __init__(self, config):
433
+ super().__init__()
434
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
435
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
436
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
437
+
438
+ def forward(self, hidden_states, input_tensor):
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+
445
+ class LxmertLayer(nn.Module):
446
+ def __init__(self, config):
447
+ super().__init__()
448
+ self.attention = LxmertSelfAttentionLayer(config)
449
+ self.intermediate = LxmertIntermediate(config)
450
+ self.output = LxmertOutput(config)
451
+
452
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
453
+ outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
454
+ attention_output = outputs[0]
455
+ intermediate_output = self.intermediate(attention_output)
456
+ layer_output = self.output(intermediate_output, attention_output)
457
+ outputs = (layer_output,) + outputs[1:] # add attentions if we output them
458
+ return outputs
459
+
460
+
461
+ class LxmertXLayer(nn.Module):
462
+ def __init__(self, config):
463
+ super().__init__()
464
+ # The cross-attention Layer
465
+ self.visual_attention = LxmertCrossAttentionLayer(config)
466
+
467
+ # Self-attention Layers
468
+ self.lang_self_att = LxmertSelfAttentionLayer(config)
469
+ self.visn_self_att = LxmertSelfAttentionLayer(config)
470
+
471
+ # Intermediate and Output Layers (FFNs)
472
+ self.lang_inter = LxmertIntermediate(config)
473
+ self.lang_output = LxmertOutput(config)
474
+ self.visn_inter = LxmertIntermediate(config)
475
+ self.visn_output = LxmertOutput(config)
476
+
477
+ def cross_att(
478
+ self,
479
+ lang_input,
480
+ lang_attention_mask,
481
+ visual_input,
482
+ visual_attention_mask,
483
+ output_x_attentions=False,
484
+ ):
485
+ # Cross Attention
486
+ lang_att_output = self.visual_attention(
487
+ lang_input,
488
+ visual_input,
489
+ ctx_att_mask=visual_attention_mask,
490
+ output_attentions=output_x_attentions,
491
+ )
492
+ visual_att_output = self.visual_attention(
493
+ visual_input,
494
+ lang_input,
495
+ ctx_att_mask=lang_attention_mask,
496
+ output_attentions=False,
497
+ )
498
+ return lang_att_output, visual_att_output
499
+
500
+ def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
501
+ # Self Attention
502
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
503
+ visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
504
+ return lang_att_output[0], visual_att_output[0]
505
+
506
+ def output_fc(self, lang_input, visual_input):
507
+ # FC layers
508
+ lang_inter_output = self.lang_inter(lang_input)
509
+ visual_inter_output = self.visn_inter(visual_input)
510
+
511
+ # Layer output
512
+ lang_output = self.lang_output(lang_inter_output, lang_input)
513
+ visual_output = self.visn_output(visual_inter_output, visual_input)
514
+
515
+ return lang_output, visual_output
516
+
517
+ def forward(
518
+ self,
519
+ lang_feats,
520
+ lang_attention_mask,
521
+ visual_feats,
522
+ visual_attention_mask,
523
+ output_attentions=False,
524
+ ):
525
+ lang_att_output, visual_att_output = self.cross_att(
526
+ lang_input=lang_feats,
527
+ lang_attention_mask=lang_attention_mask,
528
+ visual_input=visual_feats,
529
+ visual_attention_mask=visual_attention_mask,
530
+ output_x_attentions=output_attentions,
531
+ )
532
+ attention_probs = lang_att_output[1:]
533
+ lang_att_output, visual_att_output = self.self_att(
534
+ lang_att_output[0],
535
+ lang_attention_mask,
536
+ visual_att_output[0],
537
+ visual_attention_mask,
538
+ )
539
+
540
+ lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
541
+ return (
542
+ (
543
+ lang_output,
544
+ visual_output,
545
+ attention_probs[0],
546
+ )
547
+ if output_attentions
548
+ else (lang_output, visual_output)
549
+ )
550
+
551
+
552
+ class LxmertVisualFeatureEncoder(nn.Module):
553
+ def __init__(self, config):
554
+ super().__init__()
555
+ feat_dim = config.visual_feat_dim
556
+ pos_dim = config.visual_pos_dim
557
+
558
+ # Object feature encoding
559
+ self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
560
+ self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
561
+
562
+ # Box position encoding
563
+ self.box_fc = nn.Linear(pos_dim, config.hidden_size)
564
+ self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
565
+
566
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
567
+
568
+ def forward(self, visual_feats, visual_pos):
569
+ x = self.visn_fc(visual_feats)
570
+ x = self.visn_layer_norm(x)
571
+ y = self.box_fc(visual_pos)
572
+ y = self.box_layer_norm(y)
573
+ output = (x + y) / 2
574
+
575
+ output = self.dropout(output)
576
+ return output
577
+
578
+
579
+ class LxmertEncoder(nn.Module):
580
+ def __init__(self, config):
581
+ super().__init__()
582
+
583
+ # Obj-level image embedding layer
584
+ self.visn_fc = LxmertVisualFeatureEncoder(config)
585
+ self.config = config
586
+
587
+ # Number of layers
588
+ self.num_l_layers = config.l_layers
589
+ self.num_x_layers = config.x_layers
590
+ self.num_r_layers = config.r_layers
591
+
592
+ # Layers
593
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
594
+ self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
595
+ self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
596
+ self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
597
+
598
+ def forward(
599
+ self,
600
+ lang_feats,
601
+ lang_attention_mask,
602
+ visual_feats,
603
+ visual_pos,
604
+ visual_attention_mask=None,
605
+ output_attentions=None,
606
+ ):
607
+ vision_hidden_states = ()
608
+ language_hidden_states = ()
609
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
610
+ language_attentions = () if output_attentions or self.config.output_attentions else None
611
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
612
+
613
+ visual_feats = self.visn_fc(visual_feats, visual_pos)
614
+
615
+ # Run language layers
616
+ for layer_module in self.layer:
617
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
618
+ lang_feats = l_outputs[0]
619
+ language_hidden_states = language_hidden_states + (lang_feats,)
620
+ if language_attentions is not None:
621
+ language_attentions = language_attentions + (l_outputs[1],)
622
+
623
+ # Run relational layers
624
+ for layer_module in self.r_layers:
625
+ v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
626
+ visual_feats = v_outputs[0]
627
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
628
+ if vision_attentions is not None:
629
+ vision_attentions = vision_attentions + (v_outputs[1],)
630
+
631
+ # Run cross-modality layers
632
+ for layer_module in self.x_layers:
633
+ x_outputs = layer_module(
634
+ lang_feats,
635
+ lang_attention_mask,
636
+ visual_feats,
637
+ visual_attention_mask,
638
+ output_attentions=output_attentions,
639
+ )
640
+ lang_feats, visual_feats = x_outputs[:2]
641
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
642
+ language_hidden_states = language_hidden_states + (lang_feats,)
643
+ if cross_encoder_attentions is not None:
644
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
645
+ visual_encoder_outputs = (
646
+ vision_hidden_states,
647
+ vision_attentions if output_attentions else None,
648
+ )
649
+ lang_encoder_outputs = (
650
+ language_hidden_states,
651
+ language_attentions if output_attentions else None,
652
+ )
653
+ return (
654
+ visual_encoder_outputs,
655
+ lang_encoder_outputs,
656
+ cross_encoder_attentions if output_attentions else None,
657
+ )
658
+
659
+
660
+ class LxmertPooler(nn.Module):
661
+ def __init__(self, config):
662
+ super(LxmertPooler, self).__init__()
663
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
664
+ self.activation = nn.Tanh()
665
+
666
+ def forward(self, hidden_states):
667
+ # We "pool" the model by simply taking the hidden state corresponding
668
+ # to the first token.
669
+ first_token_tensor = hidden_states[:, 0]
670
+ pooled_output = self.dense(first_token_tensor)
671
+ pooled_output = self.activation(pooled_output)
672
+ return pooled_output
673
+
674
+
675
+ class LxmertPredictionHeadTransform(nn.Module):
676
+ def __init__(self, config):
677
+ super(LxmertPredictionHeadTransform, self).__init__()
678
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
679
+ self.transform_act_fn = ACT2FN[config.hidden_act]
680
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
681
+
682
+ def forward(self, hidden_states):
683
+ hidden_states = self.dense(hidden_states)
684
+ hidden_states = self.transform_act_fn(hidden_states)
685
+ hidden_states = self.LayerNorm(hidden_states)
686
+ return hidden_states
687
+
688
+
689
+ class LxmertLMPredictionHead(nn.Module):
690
+ def __init__(self, config, lxmert_model_embedding_weights):
691
+ super(LxmertLMPredictionHead, self).__init__()
692
+ self.transform = LxmertPredictionHeadTransform(config)
693
+
694
+ # The output weights are the same as the input embeddings, but there is
695
+ # an output-only bias for each token.
696
+ self.decoder = nn.Linear(
697
+ lxmert_model_embedding_weights.size(1),
698
+ lxmert_model_embedding_weights.size(0),
699
+ bias=False,
700
+ )
701
+ self.decoder.weight = lxmert_model_embedding_weights
702
+ self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0)))
703
+
704
+ def forward(self, hidden_states):
705
+ hidden_states = self.transform(hidden_states)
706
+ hidden_states = self.decoder(hidden_states) + self.bias
707
+ return hidden_states
708
+
709
+
710
+ class LxmertVisualAnswerHead(nn.Module):
711
+ def __init__(self, config, num_labels):
712
+ super().__init__()
713
+ hid_dim = config.hidden_size
714
+ self.logit_fc = nn.Sequential(
715
+ nn.Linear(hid_dim, hid_dim * 2),
716
+ GeLU(),
717
+ nn.LayerNorm(hid_dim * 2, eps=1e-12),
718
+ nn.Linear(hid_dim * 2, num_labels),
719
+ )
720
+
721
+ def forward(self, hidden_states):
722
+ return self.logit_fc(hidden_states)
723
+
724
+
725
+ class LxmertVisualObjHead(nn.Module):
726
+ def __init__(self, config):
727
+ super().__init__()
728
+ self.transform = LxmertPredictionHeadTransform(config)
729
+ # Decide the use of visual losses
730
+ visual_losses = {}
731
+ if config.visual_obj_loss:
732
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
733
+ if config.visual_attr_loss:
734
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
735
+ if config.visual_feat_loss:
736
+ visual_losses["feat"] = {
737
+ "shape": (-1, config.visual_feat_dim),
738
+ "num": config.visual_feat_dim,
739
+ }
740
+ self.visual_losses = visual_losses
741
+
742
+ # The output weights are the same as the input embeddings, but there is
743
+ # an output-only bias for each token.
744
+ self.decoder_dict = nn.ModuleDict(
745
+ {key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
746
+ )
747
+
748
+ def forward(self, hidden_states):
749
+ hidden_states = self.transform(hidden_states)
750
+ output = {}
751
+ for key in self.visual_losses:
752
+ output[key] = self.decoder_dict[key](hidden_states)
753
+ return output
754
+
755
+
756
+ class LxmertPreTrainingHeads(nn.Module):
757
+ def __init__(self, config, lxmert_model_embedding_weights):
758
+ super(LxmertPreTrainingHeads, self).__init__()
759
+ self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights)
760
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
761
+
762
+ def forward(self, sequence_output, pooled_output):
763
+ prediction_scores = self.predictions(sequence_output)
764
+ seq_relationship_score = self.seq_relationship(pooled_output)
765
+ return prediction_scores, seq_relationship_score
766
+
767
+
768
+ class LxmertPreTrainedModel(PreTrainedModel):
769
+ """
770
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
771
+ models.
772
+ """
773
+
774
+ config_class = LxmertConfig
775
+ load_tf_weights = load_tf_weights_in_lxmert
776
+ base_model_prefix = "lxmert"
777
+
778
+ def _init_weights(self, module):
779
+ """Initialize the weights"""
780
+ if isinstance(module, nn.Linear):
781
+ # Slightly different from the TF version which uses truncated_normal for initialization
782
+ # cf https://github.com/pytorch/pytorch/pull/5617
783
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
784
+ if module.bias is not None:
785
+ module.bias.data.zero_()
786
+ elif isinstance(module, nn.Embedding):
787
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
788
+ if module.padding_idx is not None:
789
+ module.weight.data[module.padding_idx].zero_()
790
+ elif isinstance(module, nn.LayerNorm):
791
+ module.bias.data.zero_()
792
+ module.weight.data.fill_(1.0)
793
+
794
+
795
+ LXMERT_START_DOCSTRING = r"""
796
+
797
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
798
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
799
+ model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MSCOCO captions, and Visual
800
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
801
+ for question answering attribute prediction, and object tag prediction.
802
+
803
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
804
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
805
+ etc.)
806
+
807
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
808
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
809
+ and behavior.
810
+
811
+ Parameters:
812
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
813
+ Initializing with a config file does not load the weights associated with the model, only the
814
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
815
+ """
816
+
817
+ LXMERT_INPUTS_DOCSTRING = r"""
818
+
819
+ Args:
820
+ input_ids (`torch.LongTensor` of shape `({0})`):
821
+ Indices of input sequence tokens in the vocabulary.
822
+
823
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
824
+ [`PreTrainedTokenizer.__call__`] for details.
825
+
826
+ [What are input IDs?](../glossary#input-ids)
827
+ visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
828
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
829
+ faster-RCNN model)
830
+
831
+ These are currently not provided by the transformers library.
832
+ visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
833
+ This input represents spacial features corresponding to their relative (via index) visual features. The
834
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
835
+ 1.
836
+
837
+ These are currently not provided by the transformers library.
838
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
839
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
840
+
841
+ - 1 for tokens that are **not masked**,
842
+ - 0 for tokens that are **masked**.
843
+
844
+ [What are attention masks?](../glossary#attention-mask)
845
+ visual_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
846
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
847
+
848
+ - 1 for tokens that are **not masked**,
849
+ - 0 for tokens that are **masked**.
850
+
851
+ [What are attention masks?](../glossary#attention-mask)
852
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
853
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
854
+ 1]`:
855
+
856
+ - 0 corresponds to a *sentence A* token,
857
+ - 1 corresponds to a *sentence B* token.
858
+
859
+ [What are token type IDs?](../glossary#token-type-ids)
860
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
861
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
862
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
863
+ model's internal embedding lookup matrix.
864
+ output_attentions (`bool`, *optional*):
865
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
866
+ tensors for more detail.
867
+ output_hidden_states (`bool`, *optional*):
868
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
869
+ more detail.
870
+ return_dict (`bool`, *optional*):
871
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
872
+ """
873
+
874
+
875
+ @add_start_docstrings(
876
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
877
+ LXMERT_START_DOCSTRING,
878
+ )
879
+ class LxmertModel(LxmertPreTrainedModel):
880
+ def __init__(self, config):
881
+ super().__init__(config)
882
+ self.embeddings = LxmertEmbeddings(config)
883
+ self.encoder = LxmertEncoder(config)
884
+ self.pooler = LxmertPooler(config)
885
+ # Initialize weights and apply final processing
886
+ self.post_init()
887
+
888
+ def get_input_embeddings(self):
889
+ return self.embeddings.word_embeddings
890
+
891
+ def set_input_embeddings(self, new_embeddings):
892
+ self.embeddings.word_embeddings = new_embeddings
893
+
894
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
895
+ @add_code_sample_docstrings(
896
+ checkpoint=_CHECKPOINT_FOR_DOC,
897
+ output_type=LxmertModelOutput,
898
+ config_class=_CONFIG_FOR_DOC,
899
+ )
900
+ def forward(
901
+ self,
902
+ input_ids: Optional[torch.LongTensor] = None,
903
+ visual_feats: Optional[torch.FloatTensor] = None,
904
+ visual_pos: Optional[torch.FloatTensor] = None,
905
+ attention_mask: Optional[torch.FloatTensor] = None,
906
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
907
+ token_type_ids: Optional[torch.LongTensor] = None,
908
+ inputs_embeds: Optional[torch.FloatTensor] = None,
909
+ output_attentions: Optional[bool] = None,
910
+ output_hidden_states: Optional[bool] = None,
911
+ return_dict: Optional[bool] = None,
912
+ ) -> Union[LxmertModelOutput, Tuple[torch.FloatTensor]]:
913
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
914
+ output_hidden_states = (
915
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
916
+ )
917
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
918
+
919
+ if input_ids is not None and inputs_embeds is not None:
920
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
921
+ elif input_ids is not None:
922
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
923
+ input_shape = input_ids.size()
924
+ elif inputs_embeds is not None:
925
+ input_shape = inputs_embeds.size()[:-1]
926
+ else:
927
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
928
+
929
+ if visual_feats is None:
930
+ raise ValueError("`visual_feats` cannot be `None`")
931
+ if visual_pos is None:
932
+ raise ValueError("`visual_pos` cannot be `None`")
933
+
934
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
935
+
936
+ if attention_mask is None:
937
+ attention_mask = torch.ones(input_shape, device=device)
938
+ if token_type_ids is None:
939
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
940
+
941
+ # We create a 3D attention mask from a 2D tensor mask.
942
+ # Sizes are [batch_size, 1, 1, to_seq_length]
943
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
944
+ # this attention mask is more simple than the triangular masking of causal attention
945
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
946
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
947
+
948
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
949
+ # masked positions, this operation will create a tensor which is 0.0 for
950
+ # positions we want to attend and the dtype's smallest value for masked positions.
951
+ # Since we are adding it to the raw scores before the softmax, this is
952
+ # effectively the same as removing these entirely.
953
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
954
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
955
+
956
+ # Process the visual attention mask
957
+ if visual_attention_mask is not None:
958
+ extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
959
+ extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
960
+ extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min
961
+ else:
962
+ extended_visual_attention_mask = None
963
+
964
+ # Positional Word Embeddings
965
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
966
+
967
+ # Run Lxmert encoder
968
+ encoder_outputs = self.encoder(
969
+ embedding_output,
970
+ extended_attention_mask,
971
+ visual_feats=visual_feats,
972
+ visual_pos=visual_pos,
973
+ visual_attention_mask=extended_visual_attention_mask,
974
+ output_attentions=output_attentions,
975
+ )
976
+
977
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
978
+ vision_hidden_states = visual_encoder_outputs[0]
979
+ language_hidden_states = lang_encoder_outputs[0]
980
+
981
+ all_attentions = ()
982
+ if output_attentions:
983
+ language_attentions = lang_encoder_outputs[1]
984
+ vision_attentions = visual_encoder_outputs[1]
985
+ cross_encoder_attentions = encoder_outputs[2]
986
+ all_attentions = (
987
+ language_attentions,
988
+ vision_attentions,
989
+ cross_encoder_attentions,
990
+ )
991
+
992
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
993
+
994
+ visual_output = vision_hidden_states[-1]
995
+ lang_output = language_hidden_states[-1]
996
+ pooled_output = self.pooler(lang_output)
997
+
998
+ if not return_dict:
999
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
1000
+
1001
+ return LxmertModelOutput(
1002
+ pooled_output=pooled_output,
1003
+ language_output=lang_output,
1004
+ vision_output=visual_output,
1005
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
1006
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
1007
+ language_attentions=language_attentions if output_attentions else None,
1008
+ vision_attentions=vision_attentions if output_attentions else None,
1009
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
1010
+ )
1011
+
1012
+
1013
+ @add_start_docstrings(
1014
+ """Lxmert Model with a specified pretraining head on top.""",
1015
+ LXMERT_START_DOCSTRING,
1016
+ )
1017
+ class LxmertForPreTraining(LxmertPreTrainedModel):
1018
+ _tied_weights_keys = ["cls.predictions.decoder.weight"]
1019
+
1020
+ def __init__(self, config):
1021
+ super().__init__(config)
1022
+ # Configuration
1023
+ self.config = config
1024
+ self.num_qa_labels = config.num_qa_labels
1025
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1026
+
1027
+ # Use of pretraining tasks
1028
+ self.task_mask_lm = config.task_mask_lm
1029
+ self.task_obj_predict = config.task_obj_predict
1030
+ self.task_matched = config.task_matched
1031
+ self.task_qa = config.task_qa
1032
+
1033
+ # Lxmert backbone
1034
+ self.lxmert = LxmertModel(config)
1035
+
1036
+ # Pre-training heads
1037
+ self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight)
1038
+ if self.task_obj_predict:
1039
+ self.obj_predict_head = LxmertVisualObjHead(config)
1040
+ if self.task_qa:
1041
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
1042
+
1043
+ # Weight initialization
1044
+ # Initialize weights and apply final processing
1045
+ self.post_init()
1046
+
1047
+ # Loss functions
1048
+ self.loss_fcts = {
1049
+ "l2": SmoothL1Loss(reduction="none"),
1050
+ "visual_ce": CrossEntropyLoss(reduction="none"),
1051
+ "ce": CrossEntropyLoss(),
1052
+ }
1053
+
1054
+ visual_losses = {}
1055
+ if config.visual_obj_loss:
1056
+ visual_losses["obj"] = {
1057
+ "shape": (-1,),
1058
+ "num": config.num_object_labels,
1059
+ "loss": "visual_ce",
1060
+ }
1061
+ if config.visual_attr_loss:
1062
+ visual_losses["attr"] = {
1063
+ "shape": (-1,),
1064
+ "num": config.num_attr_labels,
1065
+ "loss": "visual_ce",
1066
+ }
1067
+ if config.visual_feat_loss:
1068
+ visual_losses["feat"] = {
1069
+ "shape": (-1, config.visual_feat_dim),
1070
+ "num": config.visual_feat_dim,
1071
+ "loss": "l2",
1072
+ }
1073
+ self.visual_losses = visual_losses
1074
+
1075
+ def resize_num_qa_labels(self, num_labels):
1076
+ """
1077
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
1078
+ will add newly initialized weights. Reducing the size will remove weights from the end
1079
+
1080
+ Args:
1081
+ num_labels (`int`, *optional*):
1082
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
1083
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
1084
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
1085
+
1086
+ Return:
1087
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
1088
+ """
1089
+
1090
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1091
+ if num_labels is None or cur_qa_logit_layer is None:
1092
+ return
1093
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
1094
+ self.config.num_qa_labels = num_labels
1095
+ self.num_qa_labels = num_labels
1096
+
1097
+ return new_qa_logit_layer
1098
+
1099
+ def _resize_qa_labels(self, num_labels):
1100
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1101
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
1102
+ self._set_qa_logit_layer(new_qa_logit_layer)
1103
+ return self.get_qa_logit_layer()
1104
+
1105
+ def get_qa_logit_layer(self) -> nn.Module:
1106
+ """
1107
+ Returns the linear layer that produces question answering logits.
1108
+
1109
+ Returns:
1110
+ `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT
1111
+ does not have a visual answering head.
1112
+ """
1113
+ if hasattr(self, "answer_head"):
1114
+ return self.answer_head.logit_fc[-1]
1115
+
1116
+ def _set_qa_logit_layer(self, qa_logit_layer):
1117
+ self.answer_head.logit_fc[-1] = qa_logit_layer
1118
+
1119
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
1120
+ if num_labels is None:
1121
+ return cur_qa_logit_layer
1122
+
1123
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
1124
+ if cur_qa_labels == num_labels:
1125
+ return cur_qa_logit_layer
1126
+
1127
+ # Build new linear output
1128
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1129
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
1130
+ else:
1131
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
1132
+
1133
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
1134
+
1135
+ # initialize all new labels
1136
+ self._init_weights(new_qa_logit_layer)
1137
+
1138
+ # Copy labels from the previous weights
1139
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
1140
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
1141
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1142
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
1143
+
1144
+ return new_qa_logit_layer
1145
+
1146
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1147
+ @replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1148
+ def forward(
1149
+ self,
1150
+ input_ids: Optional[torch.LongTensor] = None,
1151
+ visual_feats: Optional[torch.FloatTensor] = None,
1152
+ visual_pos: Optional[torch.FloatTensor] = None,
1153
+ attention_mask: Optional[torch.FloatTensor] = None,
1154
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
1155
+ token_type_ids: Optional[torch.LongTensor] = None,
1156
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1157
+ labels: Optional[torch.LongTensor] = None,
1158
+ obj_labels: Optional[Dict[str, Tuple[torch.FloatTensor, torch.FloatTensor]]] = None,
1159
+ matched_label: Optional[torch.LongTensor] = None,
1160
+ ans: Optional[torch.Tensor] = None,
1161
+ output_attentions: Optional[bool] = None,
1162
+ output_hidden_states: Optional[bool] = None,
1163
+ return_dict: Optional[bool] = None,
1164
+ **kwargs,
1165
+ ) -> Union[LxmertForPreTrainingOutput, Tuple[torch.FloatTensor]]:
1166
+ r"""
1167
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1168
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1169
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1170
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1171
+ obj_labels (`Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*):
1172
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
1173
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
1174
+ the label score respectively
1175
+ matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1176
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
1177
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1178
+
1179
+ - 0 indicates that the sentence does not match the image,
1180
+ - 1 indicates that the sentence does match the image.
1181
+ ans (`Torch.Tensor` of shape `(batch_size)`, *optional*):
1182
+ a one hot representation hof the correct answer *optional*
1183
+
1184
+ Returns:
1185
+ """
1186
+
1187
+ if "masked_lm_labels" in kwargs:
1188
+ warnings.warn(
1189
+ "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels`"
1190
+ " instead.",
1191
+ FutureWarning,
1192
+ )
1193
+ labels = kwargs.pop("masked_lm_labels")
1194
+
1195
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1196
+
1197
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1198
+ lxmert_output = self.lxmert(
1199
+ input_ids=input_ids,
1200
+ visual_feats=visual_feats,
1201
+ visual_pos=visual_pos,
1202
+ token_type_ids=token_type_ids,
1203
+ attention_mask=attention_mask,
1204
+ visual_attention_mask=visual_attention_mask,
1205
+ inputs_embeds=inputs_embeds,
1206
+ output_hidden_states=output_hidden_states,
1207
+ output_attentions=output_attentions,
1208
+ return_dict=return_dict,
1209
+ )
1210
+
1211
+ lang_output, visual_output, pooled_output = (
1212
+ lxmert_output[0],
1213
+ lxmert_output[1],
1214
+ lxmert_output[2],
1215
+ )
1216
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
1217
+ if self.task_qa:
1218
+ answer_score = self.answer_head(pooled_output)
1219
+ else:
1220
+ answer_score = pooled_output[0][0]
1221
+
1222
+ total_loss = (
1223
+ None
1224
+ if (labels is None and matched_label is None and obj_labels is None and ans is None)
1225
+ else torch.tensor(0.0, device=device)
1226
+ )
1227
+ if labels is not None and self.task_mask_lm:
1228
+ masked_lm_loss = self.loss_fcts["ce"](
1229
+ lang_prediction_scores.view(-1, self.config.vocab_size),
1230
+ labels.view(-1),
1231
+ )
1232
+ total_loss += masked_lm_loss
1233
+ if matched_label is not None and self.task_matched:
1234
+ matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
1235
+ total_loss += matched_loss
1236
+ if obj_labels is not None and self.task_obj_predict:
1237
+ total_visual_loss = torch.tensor(0.0, device=input_ids.device)
1238
+ visual_prediction_scores_dict = self.obj_predict_head(visual_output)
1239
+ for key, key_info in self.visual_losses.items():
1240
+ label, mask_conf = obj_labels[key]
1241
+ output_dim = key_info["num"]
1242
+ loss_fct_name = key_info["loss"]
1243
+ label_shape = key_info["shape"]
1244
+ weight = self.visual_loss_normalizer
1245
+ visual_loss_fct = self.loss_fcts[loss_fct_name]
1246
+ visual_prediction_scores = visual_prediction_scores_dict[key]
1247
+ visual_loss = visual_loss_fct(
1248
+ visual_prediction_scores.view(-1, output_dim),
1249
+ label.view(label_shape),
1250
+ )
1251
+ if visual_loss.dim() > 1: # Regression Losses
1252
+ visual_loss = visual_loss.mean(1)
1253
+ visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
1254
+ total_visual_loss += visual_loss
1255
+ total_loss += total_visual_loss
1256
+ if ans is not None and self.task_qa:
1257
+ answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
1258
+ total_loss += answer_loss
1259
+
1260
+ if not return_dict:
1261
+ output = (
1262
+ lang_prediction_scores,
1263
+ cross_relationship_score,
1264
+ answer_score,
1265
+ ) + lxmert_output[3:]
1266
+ return ((total_loss,) + output) if total_loss is not None else output
1267
+
1268
+ return LxmertForPreTrainingOutput(
1269
+ loss=total_loss,
1270
+ prediction_logits=lang_prediction_scores,
1271
+ cross_relationship_score=cross_relationship_score,
1272
+ question_answering_score=answer_score,
1273
+ language_hidden_states=lxmert_output.language_hidden_states,
1274
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1275
+ language_attentions=lxmert_output.language_attentions,
1276
+ vision_attentions=lxmert_output.vision_attentions,
1277
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1278
+ )
1279
+
1280
+
1281
+ @add_start_docstrings(
1282
+ """Lxmert Model with a visual-answering head on top for downstream QA tasks""",
1283
+ LXMERT_START_DOCSTRING,
1284
+ )
1285
+ class LxmertForQuestionAnswering(LxmertPreTrainedModel):
1286
+ def __init__(self, config):
1287
+ super().__init__(config)
1288
+ # Configuration
1289
+ self.config = config
1290
+ self.num_qa_labels = config.num_qa_labels
1291
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1292
+
1293
+ # Lxmert backbone
1294
+ self.lxmert = LxmertModel(config)
1295
+
1296
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
1297
+
1298
+ # Weight initialization
1299
+ # Initialize weights and apply final processing
1300
+ self.post_init()
1301
+
1302
+ # Loss function
1303
+ self.loss = CrossEntropyLoss()
1304
+
1305
+ def resize_num_qa_labels(self, num_labels):
1306
+ """
1307
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
1308
+ will add newly initialized weights. Reducing the size will remove weights from the end
1309
+
1310
+ Args:
1311
+ num_labels (`int`, *optional*):
1312
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
1313
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
1314
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
1315
+
1316
+ Return:
1317
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
1318
+ """
1319
+
1320
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1321
+ if num_labels is None or cur_qa_logit_layer is None:
1322
+ return
1323
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
1324
+ self.config.num_qa_labels = num_labels
1325
+ self.num_qa_labels = num_labels
1326
+
1327
+ return new_qa_logit_layer
1328
+
1329
+ def _resize_qa_labels(self, num_labels):
1330
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1331
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
1332
+ self._set_qa_logit_layer(new_qa_logit_layer)
1333
+ return self.get_qa_logit_layer()
1334
+
1335
+ def get_qa_logit_layer(self) -> nn.Module:
1336
+ """
1337
+ Returns the linear layer that produces question answering logits
1338
+
1339
+ Returns:
1340
+ `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType
1341
+ object if Lxmert does not have the visual answering head.
1342
+ """
1343
+
1344
+ if hasattr(self, "answer_head"):
1345
+ return self.answer_head.logit_fc[-1]
1346
+
1347
+ def _set_qa_logit_layer(self, qa_logit_layer):
1348
+ self.answer_head.logit_fc[-1] = qa_logit_layer
1349
+
1350
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
1351
+ if num_labels is None:
1352
+ return cur_qa_logit_layer
1353
+
1354
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
1355
+ if cur_qa_labels == num_labels:
1356
+ return cur_qa_logit_layer
1357
+
1358
+ # Build new linear output
1359
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1360
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
1361
+ else:
1362
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
1363
+
1364
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
1365
+
1366
+ # initialize all new labels
1367
+ self._init_weights(new_qa_logit_layer)
1368
+
1369
+ # Copy labels from the previous weights
1370
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
1371
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
1372
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1373
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
1374
+
1375
+ return new_qa_logit_layer
1376
+
1377
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1378
+ @add_code_sample_docstrings(
1379
+ checkpoint=_CHECKPOINT_FOR_DOC,
1380
+ output_type=LxmertForQuestionAnsweringOutput,
1381
+ config_class=_CONFIG_FOR_DOC,
1382
+ )
1383
+ def forward(
1384
+ self,
1385
+ input_ids: Optional[torch.LongTensor] = None,
1386
+ visual_feats: Optional[torch.FloatTensor] = None,
1387
+ visual_pos: Optional[torch.FloatTensor] = None,
1388
+ attention_mask: Optional[torch.FloatTensor] = None,
1389
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
1390
+ token_type_ids: Optional[torch.LongTensor] = None,
1391
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1392
+ labels: Optional[torch.Tensor] = None,
1393
+ output_attentions: Optional[bool] = None,
1394
+ output_hidden_states: Optional[bool] = None,
1395
+ return_dict: Optional[bool] = None,
1396
+ ) -> Union[LxmertForQuestionAnsweringOutput, Tuple[torch.FloatTensor]]:
1397
+ r"""
1398
+ labels (`Torch.Tensor` of shape `(batch_size)`, *optional*):
1399
+ A one-hot representation of the correct answer
1400
+ """
1401
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1402
+
1403
+ lxmert_output = self.lxmert(
1404
+ input_ids=input_ids,
1405
+ visual_feats=visual_feats,
1406
+ visual_pos=visual_pos,
1407
+ token_type_ids=token_type_ids,
1408
+ attention_mask=attention_mask,
1409
+ visual_attention_mask=visual_attention_mask,
1410
+ inputs_embeds=inputs_embeds,
1411
+ output_hidden_states=output_hidden_states,
1412
+ output_attentions=output_attentions,
1413
+ return_dict=return_dict,
1414
+ )
1415
+
1416
+ pooled_output = lxmert_output[2]
1417
+ answer_score = self.answer_head(pooled_output)
1418
+ loss = None
1419
+ if labels is not None:
1420
+ loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1))
1421
+
1422
+ if not return_dict:
1423
+ output = (answer_score,) + lxmert_output[3:]
1424
+ return (loss,) + output if loss is not None else output
1425
+
1426
+ return LxmertForQuestionAnsweringOutput(
1427
+ loss=loss,
1428
+ question_answering_score=answer_score,
1429
+ language_hidden_states=lxmert_output.language_hidden_states,
1430
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1431
+ language_attentions=lxmert_output.language_attentions,
1432
+ vision_attentions=lxmert_output.vision_attentions,
1433
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1434
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py ADDED
@@ -0,0 +1,1656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the
3
+ # Lxmert Authors.
4
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """ TF 2.0 LXMERT model."""
18
+
19
+
20
+ from __future__ import annotations
21
+
22
+ import warnings
23
+ from dataclasses import dataclass
24
+ from typing import Dict, Optional, Tuple, Union
25
+
26
+ import numpy as np
27
+ import tensorflow as tf
28
+
29
+ from ...activations_tf import get_tf_activation
30
+ from ...modeling_tf_utils import (
31
+ TFModelInputType,
32
+ TFPreTrainedModel,
33
+ get_initializer,
34
+ keras,
35
+ keras_serializable,
36
+ shape_list,
37
+ unpack_inputs,
38
+ )
39
+ from ...tf_utils import check_embeddings_within_bounds, stable_softmax
40
+ from ...utils import (
41
+ ModelOutput,
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_lxmert import LxmertConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
54
+ _CONFIG_FOR_DOC = "LxmertConfig"
55
+
56
+
57
+ from ..deprecated._archive_maps import TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ @dataclass
61
+ class TFLxmertModelOutput(ModelOutput):
62
+ """
63
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
64
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
65
+ encoder")
66
+
67
+
68
+ Args:
69
+ language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
70
+ Sequence of hidden-states at the output of the last layer of the language encoder.
71
+ vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
72
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
73
+ pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
74
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
75
+ by a Linear layer and a Tanh activation function. The Linear
76
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
77
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
78
+ `(batch_size, sequence_length, hidden_size)`.
79
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
80
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
81
+ `(batch_size, sequence_length, hidden_size)`.
82
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
83
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
84
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
85
+ the self-attention heads.
86
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
87
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
88
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
89
+ the self-attention heads.
90
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
91
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
92
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
93
+ the self-attention heads.
94
+ """
95
+
96
+ language_output: tf.Tensor | None = None
97
+ vision_output: tf.Tensor | None = None
98
+ pooled_output: tf.Tensor | None = None
99
+ language_hidden_states: Tuple[tf.Tensor] | None = None
100
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
101
+ language_attentions: Tuple[tf.Tensor] | None = None
102
+ vision_attentions: Tuple[tf.Tensor] | None = None
103
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
104
+
105
+
106
+ @dataclass
107
+ class TFLxmertForPreTrainingOutput(ModelOutput):
108
+ """
109
+ Output type of [`LxmertForPreTraining`].
110
+
111
+ Args:
112
+ loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
113
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
114
+ (classification) loss.
115
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
116
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
117
+ cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`):
118
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
119
+ continuation before SoftMax).
120
+ question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`):
121
+ Prediction scores of question answering objective (classification).
122
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
123
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
124
+ `(batch_size, sequence_length, hidden_size)`.
125
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
126
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
127
+ `(batch_size, sequence_length, hidden_size)`.
128
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
129
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
130
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
131
+ the self-attention heads.
132
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
133
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
134
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
135
+ the self-attention heads.
136
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
137
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
138
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
139
+ the self-attention heads.
140
+
141
+ """
142
+
143
+ loss: tf.Tensor | None = None
144
+ prediction_logits: tf.Tensor | None = None
145
+ cross_relationship_score: tf.Tensor | None = None
146
+ question_answering_score: tf.Tensor | None = None
147
+ language_hidden_states: Tuple[tf.Tensor] | None = None
148
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
149
+ language_attentions: Tuple[tf.Tensor] | None = None
150
+ vision_attentions: Tuple[tf.Tensor] | None = None
151
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
152
+
153
+
154
+ class TFLxmertVisualFeatureEncoder(keras.layers.Layer):
155
+ def __init__(self, config, **kwargs):
156
+ super().__init__(**kwargs)
157
+
158
+ # Object feature encoding
159
+ self.visn_fc = keras.layers.Dense(
160
+ config.hidden_size,
161
+ kernel_initializer=get_initializer(config.initializer_range),
162
+ name="visn_fc",
163
+ )
164
+ self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm")
165
+
166
+ # Box position encoding
167
+ self.box_fc = keras.layers.Dense(
168
+ config.hidden_size,
169
+ kernel_initializer=get_initializer(config.initializer_range),
170
+ name="box_fc",
171
+ )
172
+ self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm")
173
+
174
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
175
+ self.feat_dim = config.visual_feat_dim
176
+ self.pos_dim = config.visual_pos_dim
177
+ self.config = config
178
+
179
+ def call(self, visn_input, training=False):
180
+ feats, boxes = visn_input
181
+
182
+ x = self.visn_fc(feats)
183
+ x = self.visn_layer_norm(x)
184
+ y = self.box_fc(boxes)
185
+ y = self.box_layer_norm(y)
186
+ output = (x + y) / 2
187
+
188
+ output = self.dropout(output, training=training)
189
+ return output
190
+
191
+ def build(self, input_shape=None):
192
+ if self.built:
193
+ return
194
+ self.built = True
195
+ if getattr(self, "visn_fc", None) is not None:
196
+ with tf.name_scope(self.visn_fc.name):
197
+ self.visn_fc.build([None, None, self.feat_dim])
198
+ if getattr(self, "visn_layer_norm", None) is not None:
199
+ with tf.name_scope(self.visn_layer_norm.name):
200
+ self.visn_layer_norm.build([None, None, self.config.hidden_size])
201
+ if getattr(self, "box_fc", None) is not None:
202
+ with tf.name_scope(self.box_fc.name):
203
+ self.box_fc.build([None, None, self.pos_dim])
204
+ if getattr(self, "box_layer_norm", None) is not None:
205
+ with tf.name_scope(self.box_layer_norm.name):
206
+ self.box_layer_norm.build([None, None, self.config.hidden_size])
207
+
208
+
209
+ class TFLxmertEmbeddings(keras.layers.Layer):
210
+ """Construct the embeddings from word, position and token_type embeddings."""
211
+
212
+ def __init__(self, config, **kwargs):
213
+ super().__init__(**kwargs)
214
+
215
+ self.config = config
216
+ self.hidden_size = config.hidden_size
217
+ self.max_position_embeddings = config.max_position_embeddings
218
+ self.initializer_range = config.initializer_range
219
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
220
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
221
+
222
+ def build(self, input_shape=None):
223
+ with tf.name_scope("word_embeddings"):
224
+ self.weight = self.add_weight(
225
+ name="weight",
226
+ shape=[self.config.vocab_size, self.hidden_size],
227
+ initializer=get_initializer(initializer_range=self.initializer_range),
228
+ )
229
+
230
+ with tf.name_scope("token_type_embeddings"):
231
+ self.token_type_embeddings = self.add_weight(
232
+ name="embeddings",
233
+ shape=[self.config.type_vocab_size, self.hidden_size],
234
+ initializer=get_initializer(initializer_range=self.initializer_range),
235
+ )
236
+
237
+ with tf.name_scope("position_embeddings"):
238
+ self.position_embeddings = self.add_weight(
239
+ name="embeddings",
240
+ shape=[self.max_position_embeddings, self.hidden_size],
241
+ initializer=get_initializer(initializer_range=self.initializer_range),
242
+ )
243
+
244
+ if self.built:
245
+ return
246
+ self.built = True
247
+ if getattr(self, "LayerNorm", None) is not None:
248
+ with tf.name_scope(self.LayerNorm.name):
249
+ self.LayerNorm.build([None, None, self.config.hidden_size])
250
+
251
+ def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
252
+ """
253
+ Applies embedding based on inputs tensor.
254
+
255
+ Returns:
256
+ final_embeddings (`tf.Tensor`): output embedding tensor.
257
+ """
258
+ assert not (input_ids is None and inputs_embeds is None)
259
+
260
+ if input_ids is not None:
261
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
262
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
263
+
264
+ input_shape = shape_list(inputs_embeds)[:-1]
265
+
266
+ if token_type_ids is None:
267
+ token_type_ids = tf.fill(dims=input_shape, value=0)
268
+
269
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
270
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
271
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
272
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
273
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
274
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
275
+
276
+ return final_embeddings
277
+
278
+
279
+ class TFLxmertAttention(keras.layers.Layer):
280
+ def __init__(self, config, **kwargs):
281
+ super().__init__(**kwargs)
282
+ if config.hidden_size % config.num_attention_heads != 0:
283
+ raise ValueError(
284
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
285
+ f"heads ({config.num_attention_heads}"
286
+ )
287
+
288
+ self.num_attention_heads = config.num_attention_heads
289
+ assert config.hidden_size % config.num_attention_heads == 0
290
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
291
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
292
+
293
+ self.query = keras.layers.Dense(
294
+ self.all_head_size,
295
+ kernel_initializer=get_initializer(config.initializer_range),
296
+ name="query",
297
+ )
298
+ self.key = keras.layers.Dense(
299
+ self.all_head_size,
300
+ kernel_initializer=get_initializer(config.initializer_range),
301
+ name="key",
302
+ )
303
+ self.value = keras.layers.Dense(
304
+ self.all_head_size,
305
+ kernel_initializer=get_initializer(config.initializer_range),
306
+ name="value",
307
+ )
308
+
309
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
310
+ self.ctx_dim = config.hidden_size
311
+ self.config = config
312
+
313
+ def transpose_for_scores(self, x, batch_size):
314
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
315
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
316
+ return tf.transpose(x, perm=[0, 2, 1, 3])
317
+
318
+ def call(self, hidden_states, context, attention_mask, output_attentions, training=False):
319
+ batch_size = shape_list(hidden_states)[0]
320
+ mixed_query_layer = self.query(hidden_states)
321
+ mixed_key_layer = self.key(context)
322
+ mixed_value_layer = self.value(context)
323
+
324
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
325
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
326
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
327
+
328
+ # Take the dot product between "query" and "key" to get the raw attention scores.
329
+ attention_scores = tf.matmul(
330
+ query_layer, key_layer, transpose_b=True
331
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
332
+ dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
333
+ attention_scores = attention_scores / tf.math.sqrt(dk)
334
+
335
+ if attention_mask is not None:
336
+ # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function)
337
+ attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
338
+ attention_scores = attention_scores + attention_mask
339
+
340
+ # Normalize the attention scores to probabilities.
341
+ attention_probs = stable_softmax(attention_scores, axis=-1)
342
+
343
+ # This is actually dropping out entire tokens to attend to, which might
344
+ # seem a bit unusual, but is taken from the original Transformer paper.
345
+ attention_probs = self.dropout(attention_probs, training=training)
346
+ context_layer = tf.matmul(attention_probs, value_layer)
347
+
348
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
349
+ context_layer = tf.reshape(
350
+ context_layer, (batch_size, -1, self.all_head_size)
351
+ ) # (batch_size, seq_len_q, all_head_size)
352
+
353
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
354
+ return outputs
355
+
356
+ def build(self, input_shape=None):
357
+ if self.built:
358
+ return
359
+ self.built = True
360
+ if getattr(self, "query", None) is not None:
361
+ with tf.name_scope(self.query.name):
362
+ self.query.build([None, None, self.config.hidden_size])
363
+ if getattr(self, "key", None) is not None:
364
+ with tf.name_scope(self.key.name):
365
+ self.key.build([None, None, self.ctx_dim])
366
+ if getattr(self, "value", None) is not None:
367
+ with tf.name_scope(self.value.name):
368
+ self.value.build([None, None, self.ctx_dim])
369
+
370
+
371
+ class TFLxmertIntermediate(keras.layers.Layer):
372
+ def __init__(self, config, **kwargs):
373
+ super().__init__(**kwargs)
374
+ self.dense = keras.layers.Dense(
375
+ config.intermediate_size,
376
+ kernel_initializer=get_initializer(config.initializer_range),
377
+ name="dense",
378
+ )
379
+ if isinstance(config.hidden_act, str):
380
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
381
+ else:
382
+ self.intermediate_act_fn = config.hidden_act
383
+ self.config = config
384
+
385
+ def call(self, hidden_states):
386
+ hidden_states = self.dense(hidden_states)
387
+ hidden_states = self.intermediate_act_fn(hidden_states)
388
+ return hidden_states
389
+
390
+ def build(self, input_shape=None):
391
+ if self.built:
392
+ return
393
+ self.built = True
394
+ if getattr(self, "dense", None) is not None:
395
+ with tf.name_scope(self.dense.name):
396
+ self.dense.build([None, None, self.config.hidden_size])
397
+
398
+
399
+ class TFLxmertOutput(keras.layers.Layer):
400
+ def __init__(self, config, **kwargs):
401
+ super().__init__(**kwargs)
402
+ self.dense = keras.layers.Dense(
403
+ config.hidden_size,
404
+ kernel_initializer=get_initializer(config.initializer_range),
405
+ name="dense",
406
+ )
407
+
408
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
409
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
410
+ self.config = config
411
+
412
+ def call(self, hidden_states, input_tensor, training=False):
413
+ hidden_states = self.dense(hidden_states)
414
+ hidden_states = self.dropout(hidden_states, training)
415
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
416
+ return hidden_states
417
+
418
+ def build(self, input_shape=None):
419
+ if self.built:
420
+ return
421
+ self.built = True
422
+ if getattr(self, "dense", None) is not None:
423
+ with tf.name_scope(self.dense.name):
424
+ self.dense.build([None, None, self.config.intermediate_size])
425
+ if getattr(self, "LayerNorm", None) is not None:
426
+ with tf.name_scope(self.LayerNorm.name):
427
+ self.LayerNorm.build([None, None, self.config.hidden_size])
428
+
429
+
430
+ class TFLxmertAttentionOutput(keras.layers.Layer):
431
+ def __init__(self, config, **kwargs):
432
+ super().__init__(**kwargs)
433
+ self.dense = keras.layers.Dense(
434
+ config.hidden_size,
435
+ kernel_initializer=get_initializer(config.initializer_range),
436
+ name="dense",
437
+ )
438
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
439
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
440
+ self.config = config
441
+
442
+ def call(self, hidden_states, input_tensor, training=False):
443
+ hidden_states = self.dense(hidden_states)
444
+ hidden_states = self.dropout(hidden_states, training=training)
445
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
446
+ return hidden_states
447
+
448
+ def build(self, input_shape=None):
449
+ if self.built:
450
+ return
451
+ self.built = True
452
+ if getattr(self, "dense", None) is not None:
453
+ with tf.name_scope(self.dense.name):
454
+ self.dense.build([None, None, self.config.hidden_size])
455
+ if getattr(self, "LayerNorm", None) is not None:
456
+ with tf.name_scope(self.LayerNorm.name):
457
+ self.LayerNorm.build([None, None, self.config.hidden_size])
458
+
459
+
460
+ class TFLxmertSelfAttentionLayer(keras.layers.Layer):
461
+ def __init__(self, config, **kwargs):
462
+ super().__init__(**kwargs)
463
+ self.self = TFLxmertAttention(config, name="self")
464
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
465
+
466
+ def call(self, input_tensor, attention_mask, output_attentions, training=False):
467
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
468
+ self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions)
469
+ if output_attentions:
470
+ attention_probs = self_output[1]
471
+ attention_output = self.attention_output(self_output[0], input_tensor)
472
+ return (attention_output, attention_probs) if output_attentions else (attention_output,)
473
+
474
+ def build(self, input_shape=None):
475
+ if self.built:
476
+ return
477
+ self.built = True
478
+ if getattr(self, "self", None) is not None:
479
+ with tf.name_scope(self.self.name):
480
+ self.self.build(None)
481
+ if getattr(self, "attention_output", None) is not None:
482
+ with tf.name_scope(self.attention_output.name):
483
+ self.attention_output.build(None)
484
+
485
+
486
+ class TFLxmertCrossAttentionLayer(keras.layers.Layer):
487
+ def __init__(self, config, **kwargs):
488
+ super().__init__(**kwargs)
489
+ self.att = TFLxmertAttention(config, name="att")
490
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
491
+
492
+ def call(
493
+ self,
494
+ input_tensor,
495
+ ctx_tensor,
496
+ ctx_att_mask,
497
+ output_attentions=False,
498
+ training=False,
499
+ ):
500
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training)
501
+ if output_attentions:
502
+ attention_probs = output[1]
503
+ attention_output = self.attention_output(output[0], input_tensor, training=training)
504
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
505
+ return outputs
506
+
507
+ def build(self, input_shape=None):
508
+ if self.built:
509
+ return
510
+ self.built = True
511
+ if getattr(self, "att", None) is not None:
512
+ with tf.name_scope(self.att.name):
513
+ self.att.build(None)
514
+ if getattr(self, "attention_output", None) is not None:
515
+ with tf.name_scope(self.attention_output.name):
516
+ self.attention_output.build(None)
517
+
518
+
519
+ class TFLxmertLayer(keras.layers.Layer):
520
+ def __init__(self, config, **kwargs):
521
+ super().__init__(**kwargs)
522
+ self.attention = TFLxmertSelfAttentionLayer(config, name="attention")
523
+ self.intermediate = TFLxmertIntermediate(config, name="intermediate")
524
+ self.transformer_output = TFLxmertOutput(config, name="output")
525
+
526
+ def call(self, hidden_states, attention_mask, output_attentions, training=False):
527
+ attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training)
528
+ attention_output = attention_outputs[0]
529
+ intermediate_output = self.intermediate(attention_output)
530
+ layer_output = self.transformer_output(intermediate_output, attention_output, training=training)
531
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
532
+ return outputs
533
+
534
+ def build(self, input_shape=None):
535
+ if self.built:
536
+ return
537
+ self.built = True
538
+ if getattr(self, "attention", None) is not None:
539
+ with tf.name_scope(self.attention.name):
540
+ self.attention.build(None)
541
+ if getattr(self, "intermediate", None) is not None:
542
+ with tf.name_scope(self.intermediate.name):
543
+ self.intermediate.build(None)
544
+ if getattr(self, "transformer_output", None) is not None:
545
+ with tf.name_scope(self.transformer_output.name):
546
+ self.transformer_output.build(None)
547
+
548
+
549
+ class TFLxmertXLayer(keras.layers.Layer):
550
+ def __init__(self, config, **kwargs):
551
+ super().__init__(**kwargs)
552
+ self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention")
553
+
554
+ # Self-attention Layers
555
+ self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att")
556
+ self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att")
557
+
558
+ # Intermediate and Output Layers (FFNs)
559
+ self.lang_inter = TFLxmertIntermediate(config, name="lang_inter")
560
+ self.lang_output = TFLxmertOutput(config, name="lang_output")
561
+ self.visn_inter = TFLxmertIntermediate(config, name="visn_inter")
562
+ self.visn_output = TFLxmertOutput(config, name="visn_output")
563
+
564
+ def cross_att(
565
+ self,
566
+ lang_input,
567
+ lang_attention_mask,
568
+ visn_input,
569
+ visn_attention_mask,
570
+ output_attentions,
571
+ training=False,
572
+ ):
573
+ # Cross Attention
574
+
575
+ # Keras saving and loading model *does not work* with the same inputs for two layers.
576
+ lang_attention_lang_input = tf.identity(lang_input)
577
+ visn_attention_lang_input = tf.identity(lang_input)
578
+ lang_attention_visn_input = tf.identity(visn_input)
579
+ visn_attention_visn_input = tf.identity(visn_input)
580
+
581
+ lang_att_output = self.visual_attention(
582
+ lang_attention_lang_input,
583
+ lang_attention_visn_input,
584
+ visn_attention_mask,
585
+ output_attentions=output_attentions,
586
+ training=training,
587
+ )
588
+ visn_att_output = self.visual_attention(
589
+ visn_attention_visn_input,
590
+ visn_attention_lang_input,
591
+ lang_attention_mask,
592
+ output_attentions=output_attentions,
593
+ training=training,
594
+ )
595
+ return lang_att_output, visn_att_output
596
+
597
+ def self_att(
598
+ self,
599
+ lang_input,
600
+ lang_attention_mask,
601
+ visn_input,
602
+ visn_attention_mask,
603
+ training=False,
604
+ ):
605
+ # Self Attention
606
+ output_attentions = False
607
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training)
608
+ visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training)
609
+ return lang_att_output[0], visn_att_output[0]
610
+
611
+ def output_fc(self, lang_input, visn_input, training=False):
612
+ # FC layers
613
+ lang_inter_output = self.lang_inter(lang_input)
614
+ visn_inter_output = self.visn_inter(visn_input)
615
+
616
+ # Layer output
617
+ lang_output = self.lang_output(lang_inter_output, lang_input, training)
618
+ visn_output = self.visn_output(visn_inter_output, visn_input, training)
619
+ return lang_output, visn_output
620
+
621
+ def call(
622
+ self,
623
+ lang_feats,
624
+ lang_attention_mask,
625
+ visn_feats,
626
+ visn_attention_mask,
627
+ output_attentions,
628
+ training=False,
629
+ ):
630
+ lang_att_output = lang_feats
631
+ visn_att_output = visn_feats
632
+
633
+ lang_att_output, visn_att_output = self.cross_att(
634
+ lang_att_output,
635
+ lang_attention_mask,
636
+ visn_att_output,
637
+ visn_attention_mask,
638
+ output_attentions,
639
+ training=training,
640
+ )
641
+ attention_probs = lang_att_output[1:]
642
+ lang_att_output, visn_att_output = self.self_att(
643
+ lang_att_output[0],
644
+ lang_attention_mask,
645
+ visn_att_output[0],
646
+ visn_attention_mask,
647
+ training=training,
648
+ )
649
+ lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training)
650
+
651
+ return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output)
652
+
653
+ def build(self, input_shape=None):
654
+ if self.built:
655
+ return
656
+ self.built = True
657
+ if getattr(self, "visual_attention", None) is not None:
658
+ with tf.name_scope(self.visual_attention.name):
659
+ self.visual_attention.build(None)
660
+ if getattr(self, "lang_self_att", None) is not None:
661
+ with tf.name_scope(self.lang_self_att.name):
662
+ self.lang_self_att.build(None)
663
+ if getattr(self, "visn_self_att", None) is not None:
664
+ with tf.name_scope(self.visn_self_att.name):
665
+ self.visn_self_att.build(None)
666
+ if getattr(self, "lang_inter", None) is not None:
667
+ with tf.name_scope(self.lang_inter.name):
668
+ self.lang_inter.build(None)
669
+ if getattr(self, "lang_output", None) is not None:
670
+ with tf.name_scope(self.lang_output.name):
671
+ self.lang_output.build(None)
672
+ if getattr(self, "visn_inter", None) is not None:
673
+ with tf.name_scope(self.visn_inter.name):
674
+ self.visn_inter.build(None)
675
+ if getattr(self, "visn_output", None) is not None:
676
+ with tf.name_scope(self.visn_output.name):
677
+ self.visn_output.build(None)
678
+
679
+
680
+ class TFLxmertEncoder(keras.layers.Layer):
681
+ def __init__(self, config, **kwargs):
682
+ super().__init__(**kwargs)
683
+
684
+ self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc")
685
+
686
+ # Number of layers
687
+ self.num_l_layers = config.l_layers
688
+ self.num_x_layers = config.x_layers
689
+ self.num_r_layers = config.r_layers
690
+
691
+ # Layers
692
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
693
+ self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)]
694
+ self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)]
695
+ self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)]
696
+ self.config = config
697
+
698
+ def call(
699
+ self,
700
+ lang_feats=None,
701
+ lang_attention_mask=None,
702
+ visual_feats=None,
703
+ visual_pos=None,
704
+ visual_attention_mask=None,
705
+ output_attentions=None,
706
+ training=False,
707
+ ):
708
+ vision_hidden_states = ()
709
+ language_hidden_states = ()
710
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
711
+ language_attentions = () if output_attentions or self.config.output_attentions else None
712
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
713
+
714
+ visual_feats = self.visn_fc([visual_feats, visual_pos], training=training)
715
+
716
+ # Run language layers
717
+ for layer_module in self.layer:
718
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training)
719
+ lang_feats = l_outputs[0]
720
+ language_hidden_states = language_hidden_states + (lang_feats,)
721
+ if language_attentions is not None:
722
+ language_attentions = language_attentions + (l_outputs[1],)
723
+
724
+ # Run relational layers
725
+ for layer_module in self.r_layers:
726
+ v_outputs = layer_module(
727
+ visual_feats,
728
+ visual_attention_mask,
729
+ output_attentions,
730
+ training=training,
731
+ )
732
+ visual_feats = v_outputs[0]
733
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
734
+ if vision_attentions is not None:
735
+ vision_attentions = vision_attentions + (v_outputs[1],)
736
+
737
+ # Run cross-modality layers
738
+ for layer_module in self.x_layers:
739
+ x_outputs = layer_module(
740
+ lang_feats,
741
+ lang_attention_mask,
742
+ visual_feats,
743
+ visual_attention_mask,
744
+ output_attentions,
745
+ training=training,
746
+ )
747
+ lang_feats, visual_feats = x_outputs[:2]
748
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
749
+ language_hidden_states = language_hidden_states + (lang_feats,)
750
+ if cross_encoder_attentions is not None:
751
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
752
+
753
+ visual_encoder_outputs = (
754
+ vision_hidden_states,
755
+ vision_attentions if output_attentions else None,
756
+ )
757
+ lang_encoder_outputs = (
758
+ language_hidden_states,
759
+ language_attentions if output_attentions else None,
760
+ )
761
+
762
+ return (
763
+ visual_encoder_outputs,
764
+ lang_encoder_outputs,
765
+ cross_encoder_attentions if output_attentions else None,
766
+ )
767
+
768
+ def build(self, input_shape=None):
769
+ if self.built:
770
+ return
771
+ self.built = True
772
+ if getattr(self, "visn_fc", None) is not None:
773
+ with tf.name_scope(self.visn_fc.name):
774
+ self.visn_fc.build(None)
775
+ if getattr(self, "layer", None) is not None:
776
+ for layer in self.layer:
777
+ with tf.name_scope(layer.name):
778
+ layer.build(None)
779
+ if getattr(self, "x_layers", None) is not None:
780
+ for layer in self.x_layers:
781
+ with tf.name_scope(layer.name):
782
+ layer.build(None)
783
+ if getattr(self, "r_layers", None) is not None:
784
+ for layer in self.r_layers:
785
+ with tf.name_scope(layer.name):
786
+ layer.build(None)
787
+
788
+
789
+ @keras_serializable
790
+ class TFLxmertMainLayer(keras.layers.Layer):
791
+ config_class = LxmertConfig
792
+
793
+ def __init__(self, config, **kwargs):
794
+ super().__init__(**kwargs)
795
+
796
+ self.config = config
797
+ self.num_l_layers = config.l_layers
798
+ self.num_x_layers = config.x_layers
799
+ self.num_r_layers = config.r_layers
800
+ self.initializer_range = config.initializer_range
801
+ self.output_attentions = config.output_attentions
802
+ self.output_hidden_states = config.output_hidden_states
803
+ self.return_dict = config.use_return_dict
804
+ self.embeddings = TFLxmertEmbeddings(config, name="embeddings")
805
+ self.encoder = TFLxmertEncoder(config, name="encoder")
806
+ self.pooler = TFLxmertPooler(config, name="pooler")
807
+ self.config = config
808
+
809
+ def get_input_embeddings(self):
810
+ return self.embeddings
811
+
812
+ def set_input_embeddings(self, value):
813
+ self.embeddings.weight = value
814
+ self.embeddings.vocab_size = shape_list(value)[0]
815
+
816
+ def _prune_heads(self, heads_to_prune):
817
+ raise NotImplementedError
818
+
819
+ @unpack_inputs
820
+ def call(
821
+ self,
822
+ input_ids=None,
823
+ visual_feats=None,
824
+ visual_pos=None,
825
+ attention_mask=None,
826
+ visual_attention_mask=None,
827
+ token_type_ids=None,
828
+ inputs_embeds=None,
829
+ output_attentions=None,
830
+ output_hidden_states=None,
831
+ return_dict=None,
832
+ training=False,
833
+ ):
834
+ if input_ids is not None and inputs_embeds is not None:
835
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
836
+ elif input_ids is not None:
837
+ input_shape = shape_list(input_ids)
838
+ elif inputs_embeds is not None:
839
+ input_shape = shape_list(inputs_embeds)[:-1]
840
+ else:
841
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
842
+ if visual_pos is None or visual_feats is None:
843
+ raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.")
844
+
845
+ if attention_mask is None:
846
+ attention_mask = tf.fill(input_shape, 1)
847
+
848
+ if token_type_ids is None:
849
+ token_type_ids = tf.fill(input_shape, 0)
850
+
851
+ # Positional Word Embeddings
852
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training)
853
+
854
+ # We create a 3D attention mask from a 2D tensor mask.
855
+ # Sizes are [batch_size, 1, 1, to_seq_length]
856
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
857
+ # this attention mask is more simple than the triangular masking of causal attention
858
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
859
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
860
+
861
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
862
+ # masked positions, this operation will create a tensor which is 0.0 for
863
+ # positions we want to attend and -10000.0 for masked positions.
864
+ # Since we are adding it to the raw scores before the softmax, this is
865
+ # effectively the same as removing these entirely.
866
+
867
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
868
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
869
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
870
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
871
+
872
+ if visual_attention_mask is not None:
873
+ extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1]))
874
+ extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1)
875
+
876
+ extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype)
877
+ extended_visual_attention_mask = tf.multiply(
878
+ tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst
879
+ )
880
+ else:
881
+ extended_visual_attention_mask = None
882
+
883
+ # Run Lxmert encoder
884
+ encoder_outputs = self.encoder(
885
+ embedding_output,
886
+ extended_attention_mask,
887
+ visual_feats,
888
+ visual_pos,
889
+ extended_visual_attention_mask,
890
+ output_attentions,
891
+ training,
892
+ )
893
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
894
+ vision_hidden_states = visual_encoder_outputs[0]
895
+ language_hidden_states = lang_encoder_outputs[0]
896
+
897
+ all_attentions = ()
898
+ if output_attentions:
899
+ language_attentions = lang_encoder_outputs[1]
900
+ vision_attentions = visual_encoder_outputs[1]
901
+ cross_encoder_attentions = encoder_outputs[2]
902
+ all_attentions = (
903
+ language_attentions,
904
+ vision_attentions,
905
+ cross_encoder_attentions,
906
+ )
907
+
908
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
909
+
910
+ visual_output = vision_hidden_states[-1]
911
+ lang_output = language_hidden_states[-1]
912
+ pooled_output = self.pooler(lang_output)
913
+
914
+ if not return_dict:
915
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
916
+
917
+ return TFLxmertModelOutput(
918
+ pooled_output=pooled_output,
919
+ language_output=lang_output,
920
+ vision_output=visual_output,
921
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
922
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
923
+ language_attentions=language_attentions if output_attentions else None,
924
+ vision_attentions=vision_attentions if output_attentions else None,
925
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
926
+ )
927
+
928
+ def build(self, input_shape=None):
929
+ if self.built:
930
+ return
931
+ self.built = True
932
+ if getattr(self, "embeddings", None) is not None:
933
+ with tf.name_scope(self.embeddings.name):
934
+ self.embeddings.build(None)
935
+ if getattr(self, "encoder", None) is not None:
936
+ with tf.name_scope(self.encoder.name):
937
+ self.encoder.build(None)
938
+ if getattr(self, "pooler", None) is not None:
939
+ with tf.name_scope(self.pooler.name):
940
+ self.pooler.build(None)
941
+
942
+
943
+ class TFLxmertPreTrainedModel(TFPreTrainedModel):
944
+ """
945
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
946
+ models.
947
+ """
948
+
949
+ config_class = LxmertConfig
950
+ base_model_prefix = "lxmert"
951
+
952
+ @property
953
+ def dummy_inputs(self):
954
+ """
955
+ Dummy inputs to build the network.
956
+
957
+ Returns:
958
+ tf.Tensor with dummy inputs
959
+ """
960
+ batch_size = 2
961
+ num_visual_features = 10
962
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
963
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
964
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
965
+
966
+ return {
967
+ "input_ids": input_ids,
968
+ "visual_feats": visual_feats,
969
+ "visual_pos": visual_pos,
970
+ }
971
+
972
+ @property
973
+ def input_signature(self):
974
+ return {
975
+ "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
976
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
977
+ "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"),
978
+ "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"),
979
+ "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"),
980
+ "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
981
+ }
982
+
983
+
984
+ LXMERT_START_DOCSTRING = r"""
985
+
986
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
987
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
988
+ model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual
989
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
990
+ for question answering attribute prediction, and object tag prediction.
991
+
992
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
993
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
994
+ behavior.
995
+
996
+ <Tip>
997
+
998
+ TensorFlow models and layers in `transformers` accept two formats as input:
999
+
1000
+ - having all inputs as keyword arguments (like PyTorch models), or
1001
+ - having all inputs as a list, tuple or dict in the first positional argument.
1002
+
1003
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1004
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1005
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1006
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1007
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1008
+ positional argument:
1009
+
1010
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1011
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1012
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1013
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1014
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1015
+
1016
+ Note that when creating models and layers with
1017
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1018
+ about any of this, as you can just pass inputs like you would to any other Python function!
1019
+
1020
+ </Tip>
1021
+
1022
+ Parameters:
1023
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
1024
+ Initializing with a config file does not load the weights associated with the model, only the
1025
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1026
+ """
1027
+
1028
+ LXMERT_INPUTS_DOCSTRING = r"""
1029
+ Args:
1030
+ input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
1031
+ Indices of input sequence tokens in the vocabulary.
1032
+
1033
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1034
+ [`PreTrainedTokenizer.encode`] for details.
1035
+
1036
+ [What are input IDs?](../glossary#input-ids)
1037
+ visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
1038
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
1039
+ faster-RCNN model)
1040
+
1041
+ These are currently not provided by the transformers library.
1042
+ visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
1043
+ This input represents spacial features corresponding to their relative (via index) visual features. The
1044
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1045
+ 1.
1046
+
1047
+ These are currently not provided by the transformers library.
1048
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1049
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1050
+
1051
+ - 1 for tokens that are **not masked**,
1052
+ - 0 for tokens that are **masked**.
1053
+
1054
+ [What are attention masks?](../glossary#attention-mask)
1055
+ visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1056
+ MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1057
+
1058
+ - 1 for tokens that are **not masked**,
1059
+ - 0 for tokens that are **masked**.
1060
+
1061
+ [What are attention masks?](../glossary#attention-mask)
1062
+ token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1063
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1064
+ 1]`:
1065
+
1066
+ - 0 corresponds to a *sentence A* token,
1067
+ - 1 corresponds to a *sentence B* token.
1068
+
1069
+ [What are token type IDs?](../glossary#token-type-ids)
1070
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1071
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1072
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1073
+ model's internal embedding lookup matrix.
1074
+ output_attentions (`bool`, *optional*):
1075
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1076
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1077
+ config will be used instead.
1078
+ output_hidden_states (`bool`, *optional*):
1079
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1080
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1081
+ used instead.
1082
+ return_dict (`bool`, *optional*):
1083
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1084
+ eager mode, in graph mode the value will always be set to True.
1085
+ training (`bool`, *optional*, defaults to `False`):
1086
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1087
+ behaviors between training and evaluation).
1088
+ """
1089
+
1090
+
1091
+ @add_start_docstrings(
1092
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
1093
+ LXMERT_START_DOCSTRING,
1094
+ )
1095
+ class TFLxmertModel(TFLxmertPreTrainedModel):
1096
+ def __init__(self, config, *inputs, **kwargs):
1097
+ super().__init__(config, *inputs, **kwargs)
1098
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
1099
+
1100
+ @unpack_inputs
1101
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
1102
+ @add_code_sample_docstrings(
1103
+ checkpoint=_CHECKPOINT_FOR_DOC,
1104
+ output_type=TFLxmertModelOutput,
1105
+ config_class=_CONFIG_FOR_DOC,
1106
+ )
1107
+ def call(
1108
+ self,
1109
+ input_ids: TFModelInputType | None = None,
1110
+ visual_feats: tf.Tensor | None = None,
1111
+ visual_pos: tf.Tensor | None = None,
1112
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1113
+ visual_attention_mask: np.ndarray | tf.Tensor | None = None,
1114
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1115
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1116
+ output_attentions: Optional[bool] = None,
1117
+ output_hidden_states: Optional[bool] = None,
1118
+ return_dict: Optional[bool] = None,
1119
+ training: bool = False,
1120
+ ) -> Union[Tuple, TFLxmertModelOutput]:
1121
+ outputs = self.lxmert(
1122
+ input_ids,
1123
+ visual_feats,
1124
+ visual_pos,
1125
+ attention_mask,
1126
+ visual_attention_mask,
1127
+ token_type_ids,
1128
+ inputs_embeds,
1129
+ output_attentions,
1130
+ output_hidden_states,
1131
+ return_dict,
1132
+ training,
1133
+ )
1134
+
1135
+ return outputs
1136
+
1137
+ def build(self, input_shape=None):
1138
+ if self.built:
1139
+ return
1140
+ self.built = True
1141
+ if getattr(self, "lxmert", None) is not None:
1142
+ with tf.name_scope(self.lxmert.name):
1143
+ self.lxmert.build(None)
1144
+
1145
+
1146
+ class TFLxmertPooler(keras.layers.Layer):
1147
+ def __init__(self, config, **kwargs):
1148
+ super().__init__(**kwargs)
1149
+ self.dense = keras.layers.Dense(
1150
+ config.hidden_size,
1151
+ kernel_initializer=get_initializer(config.initializer_range),
1152
+ activation="tanh",
1153
+ name="dense",
1154
+ )
1155
+ self.config = config
1156
+
1157
+ def call(self, hidden_states):
1158
+ # We "pool" the model by simply taking the hidden state corresponding
1159
+ # to the first token.
1160
+ first_token_tensor = hidden_states[:, 0]
1161
+ pooled_output = self.dense(first_token_tensor)
1162
+ return pooled_output
1163
+
1164
+ def build(self, input_shape=None):
1165
+ if self.built:
1166
+ return
1167
+ self.built = True
1168
+ if getattr(self, "dense", None) is not None:
1169
+ with tf.name_scope(self.dense.name):
1170
+ self.dense.build([None, None, self.config.hidden_size])
1171
+
1172
+
1173
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert
1174
+ class TFLxmertPredictionHeadTransform(keras.layers.Layer):
1175
+ def __init__(self, config: LxmertConfig, **kwargs):
1176
+ super().__init__(**kwargs)
1177
+
1178
+ self.dense = keras.layers.Dense(
1179
+ units=config.hidden_size,
1180
+ kernel_initializer=get_initializer(config.initializer_range),
1181
+ name="dense",
1182
+ )
1183
+
1184
+ if isinstance(config.hidden_act, str):
1185
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
1186
+ else:
1187
+ self.transform_act_fn = config.hidden_act
1188
+
1189
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
1190
+ self.config = config
1191
+
1192
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1193
+ hidden_states = self.dense(inputs=hidden_states)
1194
+ hidden_states = self.transform_act_fn(hidden_states)
1195
+ hidden_states = self.LayerNorm(inputs=hidden_states)
1196
+
1197
+ return hidden_states
1198
+
1199
+ def build(self, input_shape=None):
1200
+ if self.built:
1201
+ return
1202
+ self.built = True
1203
+ if getattr(self, "dense", None) is not None:
1204
+ with tf.name_scope(self.dense.name):
1205
+ self.dense.build([None, None, self.config.hidden_size])
1206
+ if getattr(self, "LayerNorm", None) is not None:
1207
+ with tf.name_scope(self.LayerNorm.name):
1208
+ self.LayerNorm.build([None, None, self.config.hidden_size])
1209
+
1210
+
1211
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert
1212
+ class TFLxmertLMPredictionHead(keras.layers.Layer):
1213
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
1214
+ super().__init__(**kwargs)
1215
+
1216
+ self.config = config
1217
+ self.hidden_size = config.hidden_size
1218
+
1219
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
1220
+
1221
+ # The output weights are the same as the input embeddings, but there is
1222
+ # an output-only bias for each token.
1223
+ self.input_embeddings = input_embeddings
1224
+
1225
+ def build(self, input_shape=None):
1226
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1227
+
1228
+ if self.built:
1229
+ return
1230
+ self.built = True
1231
+ if getattr(self, "transform", None) is not None:
1232
+ with tf.name_scope(self.transform.name):
1233
+ self.transform.build(None)
1234
+
1235
+ def get_output_embeddings(self) -> keras.layers.Layer:
1236
+ return self.input_embeddings
1237
+
1238
+ def set_output_embeddings(self, value: tf.Variable):
1239
+ self.input_embeddings.weight = value
1240
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1241
+
1242
+ def get_bias(self) -> Dict[str, tf.Variable]:
1243
+ return {"bias": self.bias}
1244
+
1245
+ def set_bias(self, value: tf.Variable):
1246
+ self.bias = value["bias"]
1247
+ self.config.vocab_size = shape_list(value["bias"])[0]
1248
+
1249
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1250
+ hidden_states = self.transform(hidden_states=hidden_states)
1251
+ seq_length = shape_list(hidden_states)[1]
1252
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1253
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1254
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1255
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1256
+
1257
+ return hidden_states
1258
+
1259
+
1260
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert
1261
+ class TFLxmertMLMHead(keras.layers.Layer):
1262
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
1263
+ super().__init__(**kwargs)
1264
+
1265
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
1266
+
1267
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
1268
+ prediction_scores = self.predictions(hidden_states=sequence_output)
1269
+
1270
+ return prediction_scores
1271
+
1272
+ def build(self, input_shape=None):
1273
+ if self.built:
1274
+ return
1275
+ self.built = True
1276
+ if getattr(self, "predictions", None) is not None:
1277
+ with tf.name_scope(self.predictions.name):
1278
+ self.predictions.build(None)
1279
+
1280
+
1281
+ class TFLxmertPreTrainingHeads(keras.layers.Layer):
1282
+ def __init__(self, config, input_embeddings, **kwargs):
1283
+ super().__init__(**kwargs)
1284
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
1285
+
1286
+ self.seq_relationship = keras.layers.Dense(
1287
+ 2,
1288
+ kernel_initializer=get_initializer(config.initializer_range),
1289
+ name="seq_relationship",
1290
+ )
1291
+ self.config = config
1292
+
1293
+ def call(self, sequence_output, pooled_output):
1294
+ prediction_scores = self.predictions(sequence_output)
1295
+ seq_relationship_score = self.seq_relationship(pooled_output)
1296
+ return prediction_scores, seq_relationship_score
1297
+
1298
+ def build(self, input_shape=None):
1299
+ if self.built:
1300
+ return
1301
+ self.built = True
1302
+ if getattr(self, "predictions", None) is not None:
1303
+ with tf.name_scope(self.predictions.name):
1304
+ self.predictions.build(None)
1305
+ if getattr(self, "seq_relationship", None) is not None:
1306
+ with tf.name_scope(self.seq_relationship.name):
1307
+ self.seq_relationship.build([None, None, self.config.hidden_size])
1308
+
1309
+
1310
+ class TFLxmertVisualAnswerHead(keras.layers.Layer):
1311
+ def __init__(self, config, num_labels, **kwargs):
1312
+ super().__init__(**kwargs)
1313
+ hid_dim = config.hidden_size
1314
+ self.dense = keras.layers.Dense(
1315
+ hid_dim * 2,
1316
+ kernel_initializer=get_initializer(config.initializer_range),
1317
+ name="logit_fc_._0",
1318
+ )
1319
+ self.activation = get_tf_activation("gelu")
1320
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2")
1321
+ self.dense_1 = keras.layers.Dense(
1322
+ num_labels,
1323
+ kernel_initializer=get_initializer(config.initializer_range),
1324
+ name="logit_fc_._3",
1325
+ )
1326
+ self.hid_dim = hid_dim
1327
+
1328
+ def call(self, hidden_states):
1329
+ hidden_states = self.dense(hidden_states)
1330
+ hidden_states = self.activation(hidden_states)
1331
+ hidden_states = self.layer_norm(hidden_states)
1332
+ hidden_states = self.dense_1(hidden_states)
1333
+
1334
+ return hidden_states
1335
+
1336
+ def build(self, input_shape=None):
1337
+ if self.built:
1338
+ return
1339
+ self.built = True
1340
+ if getattr(self, "dense", None) is not None:
1341
+ with tf.name_scope(self.dense.name):
1342
+ self.dense.build([None, None, self.hid_dim])
1343
+ if getattr(self, "layer_norm", None) is not None:
1344
+ with tf.name_scope(self.layer_norm.name):
1345
+ self.layer_norm.build([None, self.hid_dim * 2])
1346
+ if getattr(self, "dense_1", None) is not None:
1347
+ with tf.name_scope(self.dense_1.name):
1348
+ self.dense_1.build([None, None, self.hid_dim * 2])
1349
+
1350
+
1351
+ class TFLxmertVisualObjHead(keras.layers.Layer):
1352
+ def __init__(self, config, **kwargs):
1353
+ super().__init__(**kwargs)
1354
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
1355
+
1356
+ # Decide the use of visual losses
1357
+ visual_losses = {}
1358
+ if config.visual_obj_loss:
1359
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
1360
+ if config.visual_attr_loss:
1361
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
1362
+ if config.visual_feat_loss:
1363
+ visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim}
1364
+ self.visual_losses = visual_losses
1365
+
1366
+ # The output weights are the same as the input embeddings, but there is
1367
+ # an output-only bias for each token.
1368
+ self.decoder_dict = {
1369
+ key: keras.layers.Dense(
1370
+ self.visual_losses[key]["num"],
1371
+ kernel_initializer=get_initializer(config.initializer_range),
1372
+ name=f"decoder_dict.{key}",
1373
+ )
1374
+ for key in self.visual_losses
1375
+ }
1376
+ self.config = config
1377
+
1378
+ def call(self, hidden_states):
1379
+ hidden_states = self.transform(hidden_states)
1380
+ output = {}
1381
+ for key in self.visual_losses:
1382
+ output[key] = self.decoder_dict[key](hidden_states)
1383
+ return output
1384
+
1385
+ def build(self, input_shape=None):
1386
+ if self.built:
1387
+ return
1388
+ self.built = True
1389
+ if getattr(self, "transform", None) is not None:
1390
+ with tf.name_scope(self.transform.name):
1391
+ self.transform.build(None)
1392
+ if getattr(self, "decoder_dict", None) is not None:
1393
+ for layer in self.decoder_dict.values():
1394
+ with tf.name_scope(layer.name):
1395
+ layer.build([None, None, self.config.hidden_size])
1396
+
1397
+
1398
+ @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING)
1399
+ class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
1400
+ def __init__(self, config, *inputs, **kwargs):
1401
+ super().__init__(config, *inputs, **kwargs)
1402
+
1403
+ self.config = config
1404
+ self.num_qa_labels = config.num_qa_labels
1405
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1406
+
1407
+ # Use of pretraining tasks
1408
+ self.task_mask_lm = config.task_mask_lm
1409
+ self.task_obj_predict = config.task_obj_predict
1410
+ self.task_matched = config.task_matched
1411
+ self.task_qa = config.task_qa
1412
+
1413
+ # Lxmert backbone
1414
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
1415
+
1416
+ # Pre-training heads
1417
+ self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls")
1418
+ if self.task_obj_predict:
1419
+ self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head")
1420
+ if self.task_qa:
1421
+ self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head")
1422
+
1423
+ # Loss functions
1424
+ self.loss_fcts = {
1425
+ "l2": keras.losses.Huber(delta=1.0, name="huber_loss"),
1426
+ "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
1427
+ "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
1428
+ }
1429
+
1430
+ visual_losses = {}
1431
+ if config.visual_obj_loss:
1432
+ visual_losses["obj"] = {
1433
+ "shape": (-1,),
1434
+ "num": config.num_object_labels,
1435
+ "loss": "visn_ce",
1436
+ }
1437
+ if config.visual_attr_loss:
1438
+ visual_losses["attr"] = {
1439
+ "shape": (-1,),
1440
+ "num": config.num_attr_labels,
1441
+ "loss": "visn_ce",
1442
+ }
1443
+ if config.visual_feat_loss:
1444
+ visual_losses["feat"] = {
1445
+ "shape": (-1, config.visual_feat_dim),
1446
+ "num": config.visual_feat_dim,
1447
+ "loss": "l2",
1448
+ }
1449
+ self.visual_losses = visual_losses
1450
+
1451
+ @property
1452
+ def dummy_inputs(self):
1453
+ """
1454
+ Dummy inputs to build the network.
1455
+
1456
+ Returns:
1457
+ tf.Tensor with dummy inputs
1458
+ """
1459
+ batch_size = 2
1460
+ num_visual_features = 10
1461
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
1462
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
1463
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
1464
+
1465
+ if self.config.task_obj_predict:
1466
+ obj_labels = {}
1467
+ if self.config.visual_attr_loss and self.config.task_obj_predict:
1468
+ obj_labels["attr"] = (
1469
+ tf.ones([batch_size, num_visual_features]),
1470
+ tf.ones([batch_size, num_visual_features]),
1471
+ )
1472
+ if self.config.visual_feat_loss and self.config.task_obj_predict:
1473
+ obj_labels["feat"] = (
1474
+ tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]),
1475
+ tf.ones([batch_size, num_visual_features]),
1476
+ )
1477
+ if self.config.visual_obj_loss and self.config.task_obj_predict:
1478
+ obj_labels["obj"] = (
1479
+ tf.ones([batch_size, num_visual_features]),
1480
+ tf.ones([batch_size, num_visual_features]),
1481
+ )
1482
+
1483
+ return {
1484
+ **{
1485
+ "input_ids": input_ids,
1486
+ "visual_feats": visual_feats,
1487
+ "visual_pos": visual_pos,
1488
+ },
1489
+ **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}),
1490
+ }
1491
+
1492
+ def get_lm_head(self):
1493
+ return self.cls.predictions
1494
+
1495
+ def get_prefix_bias_name(self):
1496
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1497
+ return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name
1498
+
1499
+ @unpack_inputs
1500
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
1501
+ @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1502
+ def call(
1503
+ self,
1504
+ input_ids: TFModelInputType | None = None,
1505
+ visual_feats: tf.Tensor | None = None,
1506
+ visual_pos: tf.Tensor | None = None,
1507
+ attention_mask: tf.Tensor | None = None,
1508
+ visual_attention_mask: tf.Tensor | None = None,
1509
+ token_type_ids: tf.Tensor | None = None,
1510
+ inputs_embeds: tf.Tensor | None = None,
1511
+ masked_lm_labels: tf.Tensor | None = None,
1512
+ obj_labels: Dict[str, Tuple[tf.Tensor, tf.Tensor]] | None = None,
1513
+ matched_label: tf.Tensor | None = None,
1514
+ ans: tf.Tensor | None = None,
1515
+ output_attentions: bool | None = None,
1516
+ output_hidden_states: bool | None = None,
1517
+ return_dict: bool | None = None,
1518
+ training: bool = False,
1519
+ ) -> Tuple[tf.Tensor] | TFLxmertForPreTrainingOutput:
1520
+ r"""
1521
+ masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1522
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1523
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1524
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1525
+ obj_labels (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`):
1526
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
1527
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
1528
+ the label score respectively
1529
+ matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1530
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
1531
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1532
+
1533
+ - 0 indicates that the sentence does not match the image,
1534
+ - 1 indicates that the sentence does match the image.
1535
+ ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`):
1536
+ a one hot representation hof the correct answer *optional*
1537
+
1538
+ Returns:
1539
+ """
1540
+
1541
+ lxmert_output = self.lxmert(
1542
+ input_ids,
1543
+ visual_feats,
1544
+ visual_pos,
1545
+ attention_mask,
1546
+ visual_attention_mask,
1547
+ token_type_ids,
1548
+ inputs_embeds,
1549
+ output_attentions,
1550
+ output_hidden_states,
1551
+ return_dict,
1552
+ training,
1553
+ )
1554
+
1555
+ lang_output, visual_output, pooled_output = (
1556
+ lxmert_output[0],
1557
+ lxmert_output[1],
1558
+ lxmert_output[2],
1559
+ )
1560
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
1561
+ if self.task_qa:
1562
+ answer_score = self.answer_head(pooled_output)
1563
+ else:
1564
+ answer_score = pooled_output[0][0]
1565
+
1566
+ total_loss = (
1567
+ None
1568
+ if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None)
1569
+ else tf.constant(0.0)
1570
+ )
1571
+ losses = ()
1572
+ if masked_lm_labels is not None and self.task_mask_lm:
1573
+ masked_lm_loss = self.loss_fcts["ce"](
1574
+ tf.reshape(masked_lm_labels, [-1]),
1575
+ tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]),
1576
+ )
1577
+ total_loss += masked_lm_loss
1578
+ losses += (masked_lm_loss,)
1579
+ if matched_label is not None and self.task_matched:
1580
+ matched_loss = self.loss_fcts["ce"](
1581
+ tf.reshape(matched_label, [-1]),
1582
+ tf.reshape(cross_relationship_score, [-1, 2]),
1583
+ )
1584
+ total_loss += matched_loss
1585
+ losses += (matched_loss,)
1586
+ if obj_labels is not None and self.task_obj_predict:
1587
+ total_visn_loss = 0.0
1588
+ visn_prediction_scores_dict = self.obj_predict_head(visual_output)
1589
+ for key, key_info in self.visual_losses.items():
1590
+ label, mask_conf = obj_labels[key]
1591
+ output_dim = key_info["num"]
1592
+ loss_fct_name = key_info["loss"]
1593
+ label_shape = key_info["shape"]
1594
+ weight = self.visual_loss_normalizer
1595
+ visn_loss_fct = self.loss_fcts[loss_fct_name]
1596
+ visn_prediction_scores = visn_prediction_scores_dict[key]
1597
+ visn_loss = visn_loss_fct(
1598
+ tf.reshape(label, label_shape),
1599
+ tf.reshape(visn_prediction_scores, [-1, output_dim]),
1600
+ )
1601
+
1602
+ if visn_loss.ndim > 1: # Regression Losses
1603
+ visn_loss = tf.reduce_mean(visn_loss)
1604
+ visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight
1605
+ total_visn_loss += visn_loss
1606
+ losses += (visn_loss,)
1607
+ total_loss += total_visn_loss
1608
+ if ans is not None and self.task_qa:
1609
+ answer_loss = self.loss_fcts["ce"](
1610
+ tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels])
1611
+ )
1612
+ # exclude "*2" here to match the effect of QA losses.
1613
+ # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper)
1614
+ # Now : (loss *1) for 12 epochs
1615
+ #
1616
+ # * 2 # Multiply by 2 because > half of the data will not have label
1617
+ total_loss += answer_loss
1618
+ losses += (answer_loss,)
1619
+ # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach()
1620
+
1621
+ if not return_dict:
1622
+ output = (
1623
+ lang_prediction_scores,
1624
+ cross_relationship_score,
1625
+ answer_score,
1626
+ ) + lxmert_output[3:]
1627
+ return ((total_loss,) + output) if total_loss is not None else output
1628
+
1629
+ return TFLxmertForPreTrainingOutput(
1630
+ loss=total_loss,
1631
+ prediction_logits=lang_prediction_scores,
1632
+ cross_relationship_score=cross_relationship_score,
1633
+ question_answering_score=answer_score,
1634
+ language_hidden_states=lxmert_output.language_hidden_states,
1635
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1636
+ language_attentions=lxmert_output.language_attentions,
1637
+ vision_attentions=lxmert_output.vision_attentions,
1638
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1639
+ )
1640
+
1641
+ def build(self, input_shape=None):
1642
+ if self.built:
1643
+ return
1644
+ self.built = True
1645
+ if getattr(self, "lxmert", None) is not None:
1646
+ with tf.name_scope(self.lxmert.name):
1647
+ self.lxmert.build(None)
1648
+ if getattr(self, "cls", None) is not None:
1649
+ with tf.name_scope(self.cls.name):
1650
+ self.cls.build(None)
1651
+ if getattr(self, "obj_predict_head", None) is not None:
1652
+ with tf.name_scope(self.obj_predict_head.name):
1653
+ self.obj_predict_head.build(None)
1654
+ if getattr(self, "answer_head", None) is not None:
1655
+ with tf.name_scope(self.answer_head.name):
1656
+ self.answer_head.build(None)
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import collections
17
+ import os
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
28
+
29
+
30
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
31
+ def load_vocab(vocab_file):
32
+ """Loads a vocabulary file into a dictionary."""
33
+ vocab = collections.OrderedDict()
34
+ with open(vocab_file, "r", encoding="utf-8") as reader:
35
+ tokens = reader.readlines()
36
+ for index, token in enumerate(tokens):
37
+ token = token.rstrip("\n")
38
+ vocab[token] = index
39
+ return vocab
40
+
41
+
42
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
43
+ def whitespace_tokenize(text):
44
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
45
+ text = text.strip()
46
+ if not text:
47
+ return []
48
+ tokens = text.split()
49
+ return tokens
50
+
51
+
52
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, BertTokenizer->LxmertTokenizer
53
+ class LxmertTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Construct a Lxmert tokenizer. Based on WordPiece.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ do_lower_case (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to lowercase the input when tokenizing.
65
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to do basic tokenization before WordPiece.
67
+ never_split (`Iterable`, *optional*):
68
+ Collection of tokens which will never be split during tokenization. Only has an effect when
69
+ `do_basic_tokenize=True`
70
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
74
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
75
+ sequence classification or for a text and a question for question answering. It is also used as the last
76
+ token of a sequence built with special tokens.
77
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
78
+ The token used for padding, for example when batching sequences of different lengths.
79
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
80
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
81
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
82
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to tokenize Chinese characters.
87
+
88
+ This should likely be deactivated for Japanese (see this
89
+ [issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original Lxmert).
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_file,
100
+ do_lower_case=True,
101
+ do_basic_tokenize=True,
102
+ never_split=None,
103
+ unk_token="[UNK]",
104
+ sep_token="[SEP]",
105
+ pad_token="[PAD]",
106
+ cls_token="[CLS]",
107
+ mask_token="[MASK]",
108
+ tokenize_chinese_chars=True,
109
+ strip_accents=None,
110
+ **kwargs,
111
+ ):
112
+ if not os.path.isfile(vocab_file):
113
+ raise ValueError(
114
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
115
+ " model use `tokenizer = LxmertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
116
+ )
117
+ self.vocab = load_vocab(vocab_file)
118
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
119
+ self.do_basic_tokenize = do_basic_tokenize
120
+ if do_basic_tokenize:
121
+ self.basic_tokenizer = BasicTokenizer(
122
+ do_lower_case=do_lower_case,
123
+ never_split=never_split,
124
+ tokenize_chinese_chars=tokenize_chinese_chars,
125
+ strip_accents=strip_accents,
126
+ )
127
+
128
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
129
+
130
+ super().__init__(
131
+ do_lower_case=do_lower_case,
132
+ do_basic_tokenize=do_basic_tokenize,
133
+ never_split=never_split,
134
+ unk_token=unk_token,
135
+ sep_token=sep_token,
136
+ pad_token=pad_token,
137
+ cls_token=cls_token,
138
+ mask_token=mask_token,
139
+ tokenize_chinese_chars=tokenize_chinese_chars,
140
+ strip_accents=strip_accents,
141
+ **kwargs,
142
+ )
143
+
144
+ @property
145
+ def do_lower_case(self):
146
+ return self.basic_tokenizer.do_lower_case
147
+
148
+ @property
149
+ def vocab_size(self):
150
+ return len(self.vocab)
151
+
152
+ def get_vocab(self):
153
+ return dict(self.vocab, **self.added_tokens_encoder)
154
+
155
+ def _tokenize(self, text, split_special_tokens=False):
156
+ split_tokens = []
157
+ if self.do_basic_tokenize:
158
+ for token in self.basic_tokenizer.tokenize(
159
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
160
+ ):
161
+ # If the token is part of the never_split set
162
+ if token in self.basic_tokenizer.never_split:
163
+ split_tokens.append(token)
164
+ else:
165
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
166
+ else:
167
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
168
+ return split_tokens
169
+
170
+ def _convert_token_to_id(self, token):
171
+ """Converts a token (str) in an id using the vocab."""
172
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
173
+
174
+ def _convert_id_to_token(self, index):
175
+ """Converts an index (integer) in a token (str) using the vocab."""
176
+ return self.ids_to_tokens.get(index, self.unk_token)
177
+
178
+ def convert_tokens_to_string(self, tokens):
179
+ """Converts a sequence of tokens (string) in a single string."""
180
+ out_string = " ".join(tokens).replace(" ##", "").strip()
181
+ return out_string
182
+
183
+ def build_inputs_with_special_tokens(
184
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
185
+ ) -> List[int]:
186
+ """
187
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
188
+ adding special tokens. A Lxmert sequence has the following format:
189
+
190
+ - single sequence: `[CLS] X [SEP]`
191
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
192
+
193
+ Args:
194
+ token_ids_0 (`List[int]`):
195
+ List of IDs to which the special tokens will be added.
196
+ token_ids_1 (`List[int]`, *optional*):
197
+ Optional second list of IDs for sequence pairs.
198
+
199
+ Returns:
200
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
201
+ """
202
+ if token_ids_1 is None:
203
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
204
+ cls = [self.cls_token_id]
205
+ sep = [self.sep_token_id]
206
+ return cls + token_ids_0 + sep + token_ids_1 + sep
207
+
208
+ def get_special_tokens_mask(
209
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
210
+ ) -> List[int]:
211
+ """
212
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
213
+ special tokens using the tokenizer `prepare_for_model` method.
214
+
215
+ Args:
216
+ token_ids_0 (`List[int]`):
217
+ List of IDs.
218
+ token_ids_1 (`List[int]`, *optional*):
219
+ Optional second list of IDs for sequence pairs.
220
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
221
+ Whether or not the token list is already formatted with special tokens for the model.
222
+
223
+ Returns:
224
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
225
+ """
226
+
227
+ if already_has_special_tokens:
228
+ return super().get_special_tokens_mask(
229
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
230
+ )
231
+
232
+ if token_ids_1 is not None:
233
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
234
+ return [1] + ([0] * len(token_ids_0)) + [1]
235
+
236
+ def create_token_type_ids_from_sequences(
237
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
238
+ ) -> List[int]:
239
+ """
240
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
241
+ pair mask has the following format:
242
+
243
+ ```
244
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
245
+ | first sequence | second sequence |
246
+ ```
247
+
248
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
249
+
250
+ Args:
251
+ token_ids_0 (`List[int]`):
252
+ List of IDs.
253
+ token_ids_1 (`List[int]`, *optional*):
254
+ Optional second list of IDs for sequence pairs.
255
+
256
+ Returns:
257
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
258
+ """
259
+ sep = [self.sep_token_id]
260
+ cls = [self.cls_token_id]
261
+ if token_ids_1 is None:
262
+ return len(cls + token_ids_0 + sep) * [0]
263
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
264
+
265
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
266
+ index = 0
267
+ if os.path.isdir(save_directory):
268
+ vocab_file = os.path.join(
269
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
270
+ )
271
+ else:
272
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
273
+ with open(vocab_file, "w", encoding="utf-8") as writer:
274
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
275
+ if index != token_index:
276
+ logger.warning(
277
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
278
+ " Please check that the vocabulary is not corrupted!"
279
+ )
280
+ index = token_index
281
+ writer.write(token + "\n")
282
+ index += 1
283
+ return (vocab_file,)
284
+
285
+
286
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
287
+ class BasicTokenizer(object):
288
+ """
289
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
290
+
291
+ Args:
292
+ do_lower_case (`bool`, *optional*, defaults to `True`):
293
+ Whether or not to lowercase the input when tokenizing.
294
+ never_split (`Iterable`, *optional*):
295
+ Collection of tokens which will never be split during tokenization. Only has an effect when
296
+ `do_basic_tokenize=True`
297
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
298
+ Whether or not to tokenize Chinese characters.
299
+
300
+ This should likely be deactivated for Japanese (see this
301
+ [issue](https://github.com/huggingface/transformers/issues/328)).
302
+ strip_accents (`bool`, *optional*):
303
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
304
+ value for `lowercase` (as in the original BERT).
305
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
306
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
307
+ the full context of the words, such as contractions.
308
+ """
309
+
310
+ def __init__(
311
+ self,
312
+ do_lower_case=True,
313
+ never_split=None,
314
+ tokenize_chinese_chars=True,
315
+ strip_accents=None,
316
+ do_split_on_punc=True,
317
+ ):
318
+ if never_split is None:
319
+ never_split = []
320
+ self.do_lower_case = do_lower_case
321
+ self.never_split = set(never_split)
322
+ self.tokenize_chinese_chars = tokenize_chinese_chars
323
+ self.strip_accents = strip_accents
324
+ self.do_split_on_punc = do_split_on_punc
325
+
326
+ def tokenize(self, text, never_split=None):
327
+ """
328
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
329
+
330
+ Args:
331
+ never_split (`List[str]`, *optional*)
332
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
333
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
334
+ """
335
+ # union() returns a new set by concatenating the two sets.
336
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
337
+ text = self._clean_text(text)
338
+
339
+ # This was added on November 1st, 2018 for the multilingual and Chinese
340
+ # models. This is also applied to the English models now, but it doesn't
341
+ # matter since the English models were not trained on any Chinese data
342
+ # and generally don't have any Chinese data in them (there are Chinese
343
+ # characters in the vocabulary because Wikipedia does have some Chinese
344
+ # words in the English Wikipedia.).
345
+ if self.tokenize_chinese_chars:
346
+ text = self._tokenize_chinese_chars(text)
347
+ # prevents treating the same character with different unicode codepoints as different characters
348
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
349
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
350
+ split_tokens = []
351
+ for token in orig_tokens:
352
+ if token not in never_split:
353
+ if self.do_lower_case:
354
+ token = token.lower()
355
+ if self.strip_accents is not False:
356
+ token = self._run_strip_accents(token)
357
+ elif self.strip_accents:
358
+ token = self._run_strip_accents(token)
359
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
360
+
361
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
362
+ return output_tokens
363
+
364
+ def _run_strip_accents(self, text):
365
+ """Strips accents from a piece of text."""
366
+ text = unicodedata.normalize("NFD", text)
367
+ output = []
368
+ for char in text:
369
+ cat = unicodedata.category(char)
370
+ if cat == "Mn":
371
+ continue
372
+ output.append(char)
373
+ return "".join(output)
374
+
375
+ def _run_split_on_punc(self, text, never_split=None):
376
+ """Splits punctuation on a piece of text."""
377
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
378
+ return [text]
379
+ chars = list(text)
380
+ i = 0
381
+ start_new_word = True
382
+ output = []
383
+ while i < len(chars):
384
+ char = chars[i]
385
+ if _is_punctuation(char):
386
+ output.append([char])
387
+ start_new_word = True
388
+ else:
389
+ if start_new_word:
390
+ output.append([])
391
+ start_new_word = False
392
+ output[-1].append(char)
393
+ i += 1
394
+
395
+ return ["".join(x) for x in output]
396
+
397
+ def _tokenize_chinese_chars(self, text):
398
+ """Adds whitespace around any CJK character."""
399
+ output = []
400
+ for char in text:
401
+ cp = ord(char)
402
+ if self._is_chinese_char(cp):
403
+ output.append(" ")
404
+ output.append(char)
405
+ output.append(" ")
406
+ else:
407
+ output.append(char)
408
+ return "".join(output)
409
+
410
+ def _is_chinese_char(self, cp):
411
+ """Checks whether CP is the codepoint of a CJK character."""
412
+ # This defines a "chinese character" as anything in the CJK Unicode block:
413
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
414
+ #
415
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
416
+ # despite its name. The modern Korean Hangul alphabet is a different block,
417
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
418
+ # space-separated words, so they are not treated specially and handled
419
+ # like the all of the other languages.
420
+ if (
421
+ (cp >= 0x4E00 and cp <= 0x9FFF)
422
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
423
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
424
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
425
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
426
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
427
+ or (cp >= 0xF900 and cp <= 0xFAFF)
428
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
429
+ ): #
430
+ return True
431
+
432
+ return False
433
+
434
+ def _clean_text(self, text):
435
+ """Performs invalid character removal and whitespace cleanup on text."""
436
+ output = []
437
+ for char in text:
438
+ cp = ord(char)
439
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
440
+ continue
441
+ if _is_whitespace(char):
442
+ output.append(" ")
443
+ else:
444
+ output.append(char)
445
+ return "".join(output)
446
+
447
+
448
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
449
+ class WordpieceTokenizer(object):
450
+ """Runs WordPiece tokenization."""
451
+
452
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
453
+ self.vocab = vocab
454
+ self.unk_token = unk_token
455
+ self.max_input_chars_per_word = max_input_chars_per_word
456
+
457
+ def tokenize(self, text):
458
+ """
459
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
460
+ tokenization using the given vocabulary.
461
+
462
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
463
+
464
+ Args:
465
+ text: A single token or whitespace separated tokens. This should have
466
+ already been passed through *BasicTokenizer*.
467
+
468
+ Returns:
469
+ A list of wordpiece tokens.
470
+ """
471
+
472
+ output_tokens = []
473
+ for token in whitespace_tokenize(text):
474
+ chars = list(token)
475
+ if len(chars) > self.max_input_chars_per_word:
476
+ output_tokens.append(self.unk_token)
477
+ continue
478
+
479
+ is_bad = False
480
+ start = 0
481
+ sub_tokens = []
482
+ while start < len(chars):
483
+ end = len(chars)
484
+ cur_substr = None
485
+ while start < end:
486
+ substr = "".join(chars[start:end])
487
+ if start > 0:
488
+ substr = "##" + substr
489
+ if substr in self.vocab:
490
+ cur_substr = substr
491
+ break
492
+ end -= 1
493
+ if cur_substr is None:
494
+ is_bad = True
495
+ break
496
+ sub_tokens.append(cur_substr)
497
+ start = end
498
+
499
+ if is_bad:
500
+ output_tokens.append(self.unk_token)
501
+ else:
502
+ output_tokens.extend(sub_tokens)
503
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import normalizers
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from .tokenization_lxmert import LxmertTokenizer
23
+
24
+
25
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
26
+
27
+
28
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, Bert->Lxmert
29
+ class LxmertTokenizerFast(PreTrainedTokenizerFast):
30
+ r"""
31
+ Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
32
+
33
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
34
+ refer to this superclass for more information regarding those methods.
35
+
36
+ Args:
37
+ vocab_file (`str`):
38
+ File containing the vocabulary.
39
+ do_lower_case (`bool`, *optional*, defaults to `True`):
40
+ Whether or not to lowercase the input when tokenizing.
41
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
42
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
43
+ token instead.
44
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
45
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
46
+ sequence classification or for a text and a question for question answering. It is also used as the last
47
+ token of a sequence built with special tokens.
48
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
49
+ The token used for padding, for example when batching sequences of different lengths.
50
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
51
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
52
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
53
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
54
+ The token used for masking values. This is the token used when training this model with masked language
55
+ modeling. This is the token which the model will try to predict.
56
+ clean_text (`bool`, *optional*, defaults to `True`):
57
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
58
+ whitespaces by the classic one.
59
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
61
+ issue](https://github.com/huggingface/transformers/issues/328)).
62
+ strip_accents (`bool`, *optional*):
63
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
64
+ value for `lowercase` (as in the original Lxmert).
65
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
66
+ The prefix for subwords.
67
+ """
68
+
69
+ vocab_files_names = VOCAB_FILES_NAMES
70
+ slow_tokenizer_class = LxmertTokenizer
71
+
72
+ def __init__(
73
+ self,
74
+ vocab_file=None,
75
+ tokenizer_file=None,
76
+ do_lower_case=True,
77
+ unk_token="[UNK]",
78
+ sep_token="[SEP]",
79
+ pad_token="[PAD]",
80
+ cls_token="[CLS]",
81
+ mask_token="[MASK]",
82
+ tokenize_chinese_chars=True,
83
+ strip_accents=None,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(
87
+ vocab_file,
88
+ tokenizer_file=tokenizer_file,
89
+ do_lower_case=do_lower_case,
90
+ unk_token=unk_token,
91
+ sep_token=sep_token,
92
+ pad_token=pad_token,
93
+ cls_token=cls_token,
94
+ mask_token=mask_token,
95
+ tokenize_chinese_chars=tokenize_chinese_chars,
96
+ strip_accents=strip_accents,
97
+ **kwargs,
98
+ )
99
+
100
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
101
+ if (
102
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
103
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
104
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
105
+ ):
106
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
107
+ normalizer_state["lowercase"] = do_lower_case
108
+ normalizer_state["strip_accents"] = strip_accents
109
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
110
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
111
+
112
+ self.do_lower_case = do_lower_case
113
+
114
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
115
+ """
116
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
117
+ adding special tokens. A Lxmert sequence has the following format:
118
+
119
+ - single sequence: `[CLS] X [SEP]`
120
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
121
+
122
+ Args:
123
+ token_ids_0 (`List[int]`):
124
+ List of IDs to which the special tokens will be added.
125
+ token_ids_1 (`List[int]`, *optional*):
126
+ Optional second list of IDs for sequence pairs.
127
+
128
+ Returns:
129
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
130
+ """
131
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
132
+
133
+ if token_ids_1 is not None:
134
+ output += token_ids_1 + [self.sep_token_id]
135
+
136
+ return output
137
+
138
+ def create_token_type_ids_from_sequences(
139
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
140
+ ) -> List[int]:
141
+ """
142
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
143
+ pair mask has the following format:
144
+
145
+ ```
146
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
147
+ | first sequence | second sequence |
148
+ ```
149
+
150
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
151
+
152
+ Args:
153
+ token_ids_0 (`List[int]`):
154
+ List of IDs.
155
+ token_ids_1 (`List[int]`, *optional*):
156
+ Optional second list of IDs for sequence pairs.
157
+
158
+ Returns:
159
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
160
+ """
161
+ sep = [self.sep_token_id]
162
+ cls = [self.cls_token_id]
163
+ if token_ids_1 is None:
164
+ return len(cls + token_ids_0 + sep) * [0]
165
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
166
+
167
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
168
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
169
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (7.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/configuration_squeezebert.cpython-310.pyc ADDED
Binary file (6.51 kB). View file