applied-ai-018 commited on
Commit
8b6ee29
·
verified ·
1 Parent(s): 4ea5aa8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/__init__.py +59 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez_fast.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez.py +304 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py +219 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py +65 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/configuration_decision_transformer.py +161 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py +938 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__init__.py +168 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/__init__.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/configuration_electra.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/convert_electra_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_electra.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_flax_electra.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_tf_electra.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/tokenization_electra.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/tokenization_electra_fast.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/configuration_electra.py +199 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py +80 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/modeling_electra.py +1686 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/modeling_flax_electra.py +1601 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/modeling_tf_electra.py +1775 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/tokenization_electra.py +546 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/models/electra/tokenization_electra_fast.py +231 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/__init__.py +44 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/__pycache__/__init__.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/__pycache__/convert_mluke_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/__pycache__/tokenization_mluke.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py +229 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/tokenization_mluke.py +1631 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__init__.py +130 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/configuration_mpnet.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_mpnet.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/configuration_mpnet.py +117 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/modeling_mpnet.py +1055 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/modeling_tf_mpnet.py +1346 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet.py +546 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet_fast.py +226 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/__init__.py +49 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/__pycache__/__init__.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/__pycache__/configuration_timm_backbone.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/__pycache__/modeling_timm_backbone.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/configuration_timm_backbone.py +83 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/modeling_timm_backbone.py +158 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/models/videomae/__init__.py +75 -0
env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/__init__.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_barthez"] = ["BarthezTokenizer"]
29
+
30
+ try:
31
+ if not is_tokenizers_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_barthez_fast"] = ["BarthezTokenizerFast"]
37
+
38
+
39
+ if TYPE_CHECKING:
40
+ try:
41
+ if not is_sentencepiece_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ from .tokenization_barthez import BarthezTokenizer
47
+
48
+ try:
49
+ if not is_tokenizers_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .tokenization_barthez_fast import BarthezTokenizerFast
55
+
56
+ else:
57
+ import sys
58
+
59
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (919 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/__pycache__/tokenization_barthez_fast.cpython-310.pyc ADDED
Binary file (7.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for the BARThez model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+ PRETRAINED_VOCAB_FILES_MAP = {
33
+ "vocab_file": {
34
+ "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
35
+ "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
36
+ "moussaKam/barthez-orangesum-title": (
37
+ "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
38
+ ),
39
+ },
40
+ }
41
+
42
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
43
+ "moussaKam/mbarthez": 1024,
44
+ "moussaKam/barthez": 1024,
45
+ "moussaKam/barthez-orangesum-title": 1024,
46
+ }
47
+
48
+ SPIECE_UNDERLINE = "▁"
49
+
50
+ # TODO this class is useless. This is the most standard sentencpiece model. Let's find which one is closest and nuke this.
51
+
52
+
53
+ class BarthezTokenizer(PreTrainedTokenizer):
54
+ """
55
+ Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a BARThez tokenizer. Based on
56
+ [SentencePiece](https://github.com/google/sentencepiece).
57
+
58
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
59
+ this superclass for more information regarding those methods.
60
+
61
+ Args:
62
+ vocab_file (`str`):
63
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
64
+ contains the vocabulary necessary to instantiate a tokenizer.
65
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
66
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
67
+
68
+ <Tip>
69
+
70
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
71
+ sequence. The token used is the `cls_token`.
72
+
73
+ </Tip>
74
+
75
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
76
+ The end of sequence token.
77
+
78
+ <Tip>
79
+
80
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
81
+ The token used is the `sep_token`.
82
+
83
+ </Tip>
84
+
85
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
86
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
87
+ sequence classification or for a text and a question for question answering. It is also used as the last
88
+ token of a sequence built with special tokens.
89
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
90
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
91
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
92
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
93
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
94
+ token instead.
95
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
96
+ The token used for padding, for example when batching sequences of different lengths.
97
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
98
+ The token used for masking values. This is the token used when training this model with masked language
99
+ modeling. This is the token which the model will try to predict.
100
+ sp_model_kwargs (`dict`, *optional*):
101
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
102
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
103
+ to set:
104
+
105
+ - `enable_sampling`: Enable subword regularization.
106
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
107
+
108
+ - `nbest_size = {0,1}`: No sampling is performed.
109
+ - `nbest_size > 1`: samples from the nbest_size results.
110
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
111
+ using forward-filtering-and-backward-sampling algorithm.
112
+
113
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
114
+ BPE-dropout.
115
+
116
+ Attributes:
117
+ sp_model (`SentencePieceProcessor`):
118
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
119
+ """
120
+
121
+ vocab_files_names = VOCAB_FILES_NAMES
122
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
123
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
124
+ model_input_names = ["input_ids", "attention_mask"]
125
+
126
+ def __init__(
127
+ self,
128
+ vocab_file,
129
+ bos_token="<s>",
130
+ eos_token="</s>",
131
+ sep_token="</s>",
132
+ cls_token="<s>",
133
+ unk_token="<unk>",
134
+ pad_token="<pad>",
135
+ mask_token="<mask>",
136
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
137
+ **kwargs,
138
+ ) -> None:
139
+ # Mask token behave like a normal word, i.e. include the space before it. Will have normalized=False by default this way
140
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
141
+
142
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
143
+
144
+ self.vocab_file = vocab_file
145
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
146
+ self.sp_model.Load(str(vocab_file))
147
+ super().__init__(
148
+ bos_token=bos_token,
149
+ eos_token=eos_token,
150
+ unk_token=unk_token,
151
+ sep_token=sep_token,
152
+ cls_token=cls_token,
153
+ pad_token=pad_token,
154
+ mask_token=mask_token,
155
+ sp_model_kwargs=self.sp_model_kwargs,
156
+ **kwargs,
157
+ )
158
+
159
+ def build_inputs_with_special_tokens(
160
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
161
+ ) -> List[int]:
162
+ """
163
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
164
+ adding special tokens. A BARThez sequence has the following format:
165
+
166
+ - single sequence: `<s> X </s>`
167
+ - pair of sequences: `<s> A </s></s> B </s>`
168
+
169
+ Args:
170
+ token_ids_0 (`List[int]`):
171
+ List of IDs to which the special tokens will be added.
172
+ token_ids_1 (`List[int]`, *optional*):
173
+ Optional second list of IDs for sequence pairs.
174
+
175
+ Returns:
176
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
177
+ """
178
+
179
+ if token_ids_1 is None:
180
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
181
+ cls = [self.cls_token_id]
182
+ sep = [self.sep_token_id]
183
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
184
+
185
+ def get_special_tokens_mask(
186
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
187
+ ) -> List[int]:
188
+ """
189
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
190
+ special tokens using the tokenizer `prepare_for_model` method.
191
+
192
+ Args:
193
+ token_ids_0 (`List[int]`):
194
+ List of IDs.
195
+ token_ids_1 (`List[int]`, *optional*):
196
+ Optional second list of IDs for sequence pairs.
197
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
198
+ Whether or not the token list is already formatted with special tokens for the model.
199
+
200
+ Returns:
201
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
202
+ """
203
+ if already_has_special_tokens:
204
+ return super().get_special_tokens_mask(
205
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
206
+ )
207
+
208
+ if token_ids_1 is None:
209
+ return [1] + ([0] * len(token_ids_0)) + [1]
210
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
211
+
212
+ def create_token_type_ids_from_sequences(
213
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
214
+ ) -> List[int]:
215
+ """
216
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task.
217
+
218
+ Args:
219
+ token_ids_0 (`List[int]`):
220
+ List of IDs.
221
+ token_ids_1 (`List[int]`, *optional*):
222
+ Optional second list of IDs for sequence pairs.
223
+
224
+ Returns:
225
+ `List[int]`: List of zeros.
226
+ """
227
+ sep = [self.sep_token_id]
228
+ cls = [self.cls_token_id]
229
+
230
+ if token_ids_1 is None:
231
+ return len(cls + token_ids_0 + sep) * [0]
232
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
233
+
234
+ @property
235
+ def vocab_size(self):
236
+ return len(self.sp_model)
237
+
238
+ def get_vocab(self):
239
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
240
+ vocab.update(self.added_tokens_encoder)
241
+ return vocab
242
+
243
+ def _tokenize(self, text: str) -> List[str]:
244
+ return self.sp_model.encode(text, out_type=str)
245
+
246
+ def _convert_token_to_id(self, token):
247
+ """Converts a token (str) in an id using the vocab."""
248
+ return self.sp_model.PieceToId(token)
249
+
250
+ def _convert_id_to_token(self, index):
251
+ """Converts an index (integer) in a token (str) using the vocab."""
252
+ return self.sp_model.IdToPiece(index)
253
+
254
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
255
+ def convert_tokens_to_string(self, tokens):
256
+ """Converts a sequence of tokens (string) in a single string."""
257
+ current_sub_tokens = []
258
+ out_string = ""
259
+ prev_is_special = False
260
+ for token in tokens:
261
+ # make sure that special tokens are not decoded using sentencepiece model
262
+ if token in self.all_special_tokens:
263
+ if not prev_is_special:
264
+ out_string += " "
265
+ out_string += self.sp_model.decode(current_sub_tokens) + token
266
+ prev_is_special = True
267
+ current_sub_tokens = []
268
+ else:
269
+ current_sub_tokens.append(token)
270
+ prev_is_special = False
271
+ out_string += self.sp_model.decode(current_sub_tokens)
272
+ return out_string.strip()
273
+
274
+ def __getstate__(self):
275
+ state = self.__dict__.copy()
276
+ state["sp_model"] = None
277
+ return state
278
+
279
+ def __setstate__(self, d):
280
+ self.__dict__ = d
281
+
282
+ # for backward compatibility
283
+ if not hasattr(self, "sp_model_kwargs"):
284
+ self.sp_model_kwargs = {}
285
+
286
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
287
+ self.sp_model.Load(self.vocab_file)
288
+
289
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
290
+ if not os.path.isdir(save_directory):
291
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
292
+ return
293
+ out_vocab_file = os.path.join(
294
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
295
+ )
296
+
297
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
298
+ copyfile(self.vocab_file, out_vocab_file)
299
+ elif not os.path.isfile(self.vocab_file):
300
+ with open(out_vocab_file, "wb") as fi:
301
+ content_spiece_model = self.sp_model.serialized_model_proto()
302
+ fi.write(content_spiece_model)
303
+
304
+ return (out_vocab_file,)
env-llmeval/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for the BARThez model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_barthez import BarthezTokenizer
29
+ else:
30
+ BarthezTokenizer = None
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
35
+
36
+ PRETRAINED_VOCAB_FILES_MAP = {
37
+ "vocab_file": {
38
+ "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
39
+ "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
40
+ "moussaKam/barthez-orangesum-title": (
41
+ "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
42
+ ),
43
+ },
44
+ "tokenizer_file": {
45
+ "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
46
+ "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
47
+ "moussaKam/barthez-orangesum-title": (
48
+ "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
49
+ ),
50
+ },
51
+ }
52
+
53
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
54
+ "moussaKam/mbarthez": 1024,
55
+ "moussaKam/barthez": 1024,
56
+ "moussaKam/barthez-orangesum-title": 1024,
57
+ }
58
+
59
+ SPIECE_UNDERLINE = "▁"
60
+
61
+
62
+ class BarthezTokenizerFast(PreTrainedTokenizerFast):
63
+ """
64
+ Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a "fast" BARThez tokenizer. Based on
65
+ [SentencePiece](https://github.com/google/sentencepiece).
66
+
67
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
68
+ refer to this superclass for more information regarding those methods.
69
+
70
+ Args:
71
+ vocab_file (`str`):
72
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
73
+ contains the vocabulary necessary to instantiate a tokenizer.
74
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
75
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
76
+
77
+ <Tip>
78
+
79
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
80
+ sequence. The token used is the `cls_token`.
81
+
82
+ </Tip>
83
+
84
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
85
+ The end of sequence token.
86
+
87
+ <Tip>
88
+
89
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
90
+ The token used is the `sep_token`.
91
+
92
+ </Tip>
93
+
94
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
95
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
96
+ sequence classification or for a text and a question for question answering. It is also used as the last
97
+ token of a sequence built with special tokens.
98
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
99
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
100
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
101
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
102
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
103
+ token instead.
104
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
105
+ The token used for padding, for example when batching sequences of different lengths.
106
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
107
+ The token used for masking values. This is the token used when training this model with masked language
108
+ modeling. This is the token which the model will try to predict.
109
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
110
+ Additional special tokens used by the tokenizer.
111
+ """
112
+
113
+ vocab_files_names = VOCAB_FILES_NAMES
114
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
115
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
116
+ model_input_names = ["input_ids", "attention_mask"]
117
+ slow_tokenizer_class = BarthezTokenizer
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_file=None,
122
+ tokenizer_file=None,
123
+ bos_token="<s>",
124
+ eos_token="</s>",
125
+ sep_token="</s>",
126
+ cls_token="<s>",
127
+ unk_token="<unk>",
128
+ pad_token="<pad>",
129
+ mask_token="<mask>",
130
+ **kwargs,
131
+ ):
132
+ # Mask token behave like a normal word, i.e. include the space before it
133
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
134
+
135
+ super().__init__(
136
+ vocab_file,
137
+ tokenizer_file=tokenizer_file,
138
+ bos_token=bos_token,
139
+ eos_token=eos_token,
140
+ unk_token=unk_token,
141
+ sep_token=sep_token,
142
+ cls_token=cls_token,
143
+ pad_token=pad_token,
144
+ mask_token=mask_token,
145
+ **kwargs,
146
+ )
147
+
148
+ self.vocab_file = vocab_file
149
+
150
+ @property
151
+ def can_save_slow_tokenizer(self) -> bool:
152
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
153
+
154
+ def build_inputs_with_special_tokens(
155
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
156
+ ) -> List[int]:
157
+ """
158
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
159
+ adding special tokens. A BARThez sequence has the following format:
160
+
161
+ - single sequence: `<s> X </s>`
162
+ - pair of sequences: `<s> A </s></s> B </s>`
163
+
164
+ Args:
165
+ token_ids_0 (`List[int]`):
166
+ List of IDs to which the special tokens will be added.
167
+ token_ids_1 (`List[int]`, *optional*):
168
+ Optional second list of IDs for sequence pairs.
169
+
170
+ Returns:
171
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
172
+ """
173
+
174
+ if token_ids_1 is None:
175
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
176
+ cls = [self.cls_token_id]
177
+ sep = [self.sep_token_id]
178
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
179
+
180
+ def create_token_type_ids_from_sequences(
181
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
182
+ ) -> List[int]:
183
+ """
184
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task.
185
+
186
+ Args:
187
+ token_ids_0 (`List[int]`):
188
+ List of IDs.
189
+ token_ids_1 (`List[int]`, *optional*):
190
+ Optional second list of IDs for sequence pairs.
191
+
192
+ Returns:
193
+ `List[int]`: List of zeros.
194
+ """
195
+ sep = [self.sep_token_id]
196
+ cls = [self.cls_token_id]
197
+
198
+ if token_ids_1 is None:
199
+ return len(cls + token_ids_0 + sep) * [0]
200
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
201
+
202
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
203
+ if not self.can_save_slow_tokenizer:
204
+ raise ValueError(
205
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
206
+ "tokenizer."
207
+ )
208
+
209
+ if not os.path.isdir(save_directory):
210
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
211
+ return
212
+ out_vocab_file = os.path.join(
213
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
214
+ )
215
+
216
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
217
+ copyfile(self.vocab_file, out_vocab_file)
218
+
219
+ return (out_vocab_file,)
env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_decision_transformer": [
21
+ "DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "DecisionTransformerConfig",
23
+ ],
24
+ }
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_decision_transformer"] = [
33
+ "DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
34
+ "DecisionTransformerGPT2Model",
35
+ "DecisionTransformerGPT2PreTrainedModel",
36
+ "DecisionTransformerModel",
37
+ "DecisionTransformerPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_decision_transformer import (
43
+ DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ DecisionTransformerConfig,
45
+ )
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_decision_transformer import (
54
+ DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ DecisionTransformerGPT2Model,
56
+ DecisionTransformerGPT2PreTrainedModel,
57
+ DecisionTransformerModel,
58
+ DecisionTransformerPreTrainedModel,
59
+ )
60
+
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/configuration_decision_transformer.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Decision Transformer model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "edbeeching/decision-transformer-gym-hopper-medium": (
25
+ "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
26
+ ),
27
+ # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
28
+ }
29
+
30
+
31
+ class DecisionTransformerConfig(PretrainedConfig):
32
+ """
33
+ This is the configuration class to store the configuration of a [`DecisionTransformerModel`]. It is used to
34
+ instantiate a Decision Transformer model according to the specified arguments, defining the model architecture.
35
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the standard
36
+ DecisionTransformer architecture. Many of the config options are used to instatiate the GPT2 model that is used as
37
+ part of the architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+
43
+ Args:
44
+ state_dim (`int`, *optional*, defaults to 17):
45
+ The state size for the RL environment
46
+ act_dim (`int`, *optional*, defaults to 4):
47
+ The size of the output action space
48
+ hidden_size (`int`, *optional*, defaults to 128):
49
+ The size of the hidden layers
50
+ max_ep_len (`int`, *optional*, defaults to 4096):
51
+ The maximum length of an episode in the environment
52
+ action_tanh (`bool`, *optional*, defaults to True):
53
+ Whether to use a tanh activation on action prediction
54
+ vocab_size (`int`, *optional*, defaults to 50257):
55
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
56
+ `inputs_ids` passed when calling [`DecisionTransformerModel`].
57
+ n_positions (`int`, *optional*, defaults to 1024):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ n_layer (`int`, *optional*, defaults to 3):
61
+ Number of hidden layers in the Transformer encoder.
62
+ n_head (`int`, *optional*, defaults to 1):
63
+ Number of attention heads for each attention layer in the Transformer encoder.
64
+ n_inner (`int`, *optional*):
65
+ Dimensionality of the inner feed-forward layers. If unset, will default to 4 times `n_embd`.
66
+ activation_function (`str`, *optional*, defaults to `"gelu"`):
67
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
68
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
69
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
70
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
71
+ The dropout ratio for the embeddings.
72
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
73
+ The dropout ratio for the attention.
74
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
75
+ The epsilon to use in the layer normalization layers.
76
+ initializer_range (`float`, *optional*, defaults to 0.02):
77
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
78
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
79
+ Scale attention weights by dividing by sqrt(hidden_size)..
80
+ use_cache (`bool`, *optional*, defaults to `True`):
81
+ Whether or not the model should return the last key/values attentions (not used by all models).
82
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
83
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
84
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
85
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
86
+ dot-product/softmax to float() when training with mixed precision.
87
+
88
+ Example:
89
+
90
+ ```python
91
+ >>> from transformers import DecisionTransformerConfig, DecisionTransformerModel
92
+
93
+ >>> # Initializing a DecisionTransformer configuration
94
+ >>> configuration = DecisionTransformerConfig()
95
+
96
+ >>> # Initializing a model (with random weights) from the configuration
97
+ >>> model = DecisionTransformerModel(configuration)
98
+
99
+ >>> # Accessing the model configuration
100
+ >>> configuration = model.config
101
+ ```"""
102
+
103
+ model_type = "decision_transformer"
104
+ keys_to_ignore_at_inference = ["past_key_values"]
105
+ attribute_map = {
106
+ "max_position_embeddings": "n_positions",
107
+ "num_attention_heads": "n_head",
108
+ "num_hidden_layers": "n_layer",
109
+ }
110
+
111
+ def __init__(
112
+ self,
113
+ state_dim=17,
114
+ act_dim=4,
115
+ hidden_size=128,
116
+ max_ep_len=4096,
117
+ action_tanh=True,
118
+ vocab_size=1,
119
+ n_positions=1024,
120
+ n_layer=3,
121
+ n_head=1,
122
+ n_inner=None,
123
+ activation_function="relu",
124
+ resid_pdrop=0.1,
125
+ embd_pdrop=0.1,
126
+ attn_pdrop=0.1,
127
+ layer_norm_epsilon=1e-5,
128
+ initializer_range=0.02,
129
+ scale_attn_weights=True,
130
+ use_cache=True,
131
+ bos_token_id=50256,
132
+ eos_token_id=50256,
133
+ scale_attn_by_inverse_layer_idx=False,
134
+ reorder_and_upcast_attn=False,
135
+ **kwargs,
136
+ ):
137
+ self.state_dim = state_dim
138
+ self.act_dim = act_dim
139
+ self.hidden_size = hidden_size
140
+ self.max_ep_len = max_ep_len
141
+ self.action_tanh = action_tanh
142
+ self.vocab_size = vocab_size
143
+ self.n_positions = n_positions
144
+ self.n_layer = n_layer
145
+ self.n_head = n_head
146
+ self.n_inner = n_inner
147
+ self.activation_function = activation_function
148
+ self.resid_pdrop = resid_pdrop
149
+ self.embd_pdrop = embd_pdrop
150
+ self.attn_pdrop = attn_pdrop
151
+ self.layer_norm_epsilon = layer_norm_epsilon
152
+ self.initializer_range = initializer_range
153
+ self.scale_attn_weights = scale_attn_weights
154
+ self.use_cache = use_cache
155
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
156
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
157
+
158
+ self.bos_token_id = bos_token_id
159
+ self.eos_token_id = eos_token_id
160
+
161
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/decision_transformer/modeling_decision_transformer.py ADDED
@@ -0,0 +1,938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DecisionTransformer model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.cuda.amp import autocast
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
31
+ from ...utils import (
32
+ ModelOutput,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_decision_transformer import DecisionTransformerConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
44
+ _CONFIG_FOR_DOC = "DecisionTransformerConfig"
45
+
46
+ DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
47
+ "edbeeching/decision-transformer-gym-hopper-medium",
48
+ # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
49
+ ]
50
+
51
+
52
+ # Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
53
+ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
54
+ """Load tf checkpoints in a pytorch model"""
55
+ try:
56
+ import re
57
+
58
+ import tensorflow as tf
59
+ except ImportError:
60
+ logger.error(
61
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
62
+ "https://www.tensorflow.org/install/ for installation instructions."
63
+ )
64
+ raise
65
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
66
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
67
+ # Load weights from TF model
68
+ init_vars = tf.train.list_variables(tf_path)
69
+ names = []
70
+ arrays = []
71
+ for name, shape in init_vars:
72
+ logger.info(f"Loading TF weight {name} with shape {shape}")
73
+ array = tf.train.load_variable(tf_path, name)
74
+ names.append(name)
75
+ arrays.append(array.squeeze())
76
+
77
+ for name, array in zip(names, arrays):
78
+ name = name[6:] # skip "model/"
79
+ name = name.split("/")
80
+ pointer = model
81
+ for m_name in name:
82
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
83
+ scope_names = re.split(r"(\d+)", m_name)
84
+ else:
85
+ scope_names = [m_name]
86
+ if scope_names[0] == "w" or scope_names[0] == "g":
87
+ pointer = getattr(pointer, "weight")
88
+ elif scope_names[0] == "b":
89
+ pointer = getattr(pointer, "bias")
90
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
91
+ pointer = getattr(pointer, scope_names[0])
92
+ pointer = getattr(pointer, "weight")
93
+ else:
94
+ pointer = getattr(pointer, scope_names[0])
95
+ if len(scope_names) >= 2:
96
+ num = int(scope_names[1])
97
+ pointer = pointer[num]
98
+ try:
99
+ if pointer.shape != array.shape:
100
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
101
+ except ValueError as e:
102
+ e.args += (pointer.shape, array.shape)
103
+ raise
104
+ logger.info(f"Initialize PyTorch weight {name}")
105
+ pointer.data = torch.from_numpy(array)
106
+ return model
107
+
108
+
109
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
110
+ class DecisionTransformerGPT2Attention(nn.Module):
111
+ def __init__(self, config, is_cross_attention=False, layer_idx=None):
112
+ super().__init__()
113
+
114
+ max_positions = config.max_position_embeddings
115
+ self.register_buffer(
116
+ "bias",
117
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
118
+ 1, 1, max_positions, max_positions
119
+ ),
120
+ persistent=False,
121
+ )
122
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
123
+
124
+ self.embed_dim = config.hidden_size
125
+ self.num_heads = config.num_attention_heads
126
+ self.head_dim = self.embed_dim // self.num_heads
127
+ self.split_size = self.embed_dim
128
+ if self.head_dim * self.num_heads != self.embed_dim:
129
+ raise ValueError(
130
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
131
+ f" {self.num_heads})."
132
+ )
133
+
134
+ self.scale_attn_weights = config.scale_attn_weights
135
+ self.is_cross_attention = is_cross_attention
136
+
137
+ # Layer-wise attention scaling, reordering, and upcasting
138
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
139
+ self.layer_idx = layer_idx
140
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
141
+
142
+ if self.is_cross_attention:
143
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
144
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
145
+ else:
146
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
147
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
148
+
149
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
150
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
151
+
152
+ self.pruned_heads = set()
153
+
154
+ def prune_heads(self, heads):
155
+ if len(heads) == 0:
156
+ return
157
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
158
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
159
+
160
+ # Prune conv1d layers
161
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
162
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
163
+
164
+ # Update hyper params
165
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
166
+ self.num_heads = self.num_heads - len(heads)
167
+ self.pruned_heads = self.pruned_heads.union(heads)
168
+
169
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
170
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
171
+
172
+ if self.scale_attn_weights:
173
+ attn_weights = attn_weights / torch.full(
174
+ [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
175
+ )
176
+
177
+ # Layer-wise attention scaling
178
+ if self.scale_attn_by_inverse_layer_idx:
179
+ attn_weights = attn_weights / float(self.layer_idx + 1)
180
+
181
+ if not self.is_cross_attention:
182
+ # if only "normal" attention layer implements causal mask
183
+ query_length, key_length = query.size(-2), key.size(-2)
184
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
185
+ mask_value = torch.finfo(attn_weights.dtype).min
186
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
187
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
188
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
189
+ attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
190
+
191
+ if attention_mask is not None:
192
+ # Apply the attention mask
193
+ attn_weights = attn_weights + attention_mask
194
+
195
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
196
+
197
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
198
+ attn_weights = attn_weights.type(value.dtype)
199
+ attn_weights = self.attn_dropout(attn_weights)
200
+
201
+ # Mask heads if we want to
202
+ if head_mask is not None:
203
+ attn_weights = attn_weights * head_mask
204
+
205
+ attn_output = torch.matmul(attn_weights, value)
206
+
207
+ return attn_output, attn_weights
208
+
209
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
210
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
211
+ bsz, num_heads, q_seq_len, dk = query.size()
212
+ _, _, k_seq_len, _ = key.size()
213
+
214
+ # Preallocate attn_weights for `baddbmm`
215
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
216
+
217
+ # Compute Scale Factor
218
+ scale_factor = 1.0
219
+ if self.scale_attn_weights:
220
+ scale_factor /= float(value.size(-1)) ** 0.5
221
+
222
+ if self.scale_attn_by_inverse_layer_idx:
223
+ scale_factor /= float(self.layer_idx + 1)
224
+
225
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
226
+ with autocast(enabled=False):
227
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
228
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
229
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
230
+
231
+ if not self.is_cross_attention:
232
+ # if only "normal" attention layer implements causal mask
233
+ query_length, key_length = query.size(-2), key.size(-2)
234
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
235
+ mask_value = torch.finfo(attn_weights.dtype).min
236
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
237
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
238
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
239
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
240
+
241
+ if attention_mask is not None:
242
+ # Apply the attention mask
243
+ attn_weights = attn_weights + attention_mask
244
+
245
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
246
+
247
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
248
+ if attn_weights.dtype != torch.float32:
249
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
250
+ attn_weights = attn_weights.type(value.dtype)
251
+ attn_weights = self.attn_dropout(attn_weights)
252
+
253
+ # Mask heads if we want to
254
+ if head_mask is not None:
255
+ attn_weights = attn_weights * head_mask
256
+
257
+ attn_output = torch.matmul(attn_weights, value)
258
+
259
+ return attn_output, attn_weights
260
+
261
+ def _split_heads(self, tensor, num_heads, attn_head_size):
262
+ """
263
+ Splits hidden_size dim into attn_head_size and num_heads
264
+ """
265
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
266
+ tensor = tensor.view(new_shape)
267
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
268
+
269
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
270
+ """
271
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
272
+ """
273
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
274
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
275
+ return tensor.view(new_shape)
276
+
277
+ def forward(
278
+ self,
279
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
280
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
281
+ attention_mask: Optional[torch.FloatTensor] = None,
282
+ head_mask: Optional[torch.FloatTensor] = None,
283
+ encoder_hidden_states: Optional[torch.Tensor] = None,
284
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
285
+ use_cache: Optional[bool] = False,
286
+ output_attentions: Optional[bool] = False,
287
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
288
+ if encoder_hidden_states is not None:
289
+ if not hasattr(self, "q_attn"):
290
+ raise ValueError(
291
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
292
+ "Please make sure to instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
293
+ )
294
+
295
+ query = self.q_attn(hidden_states)
296
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
297
+ attention_mask = encoder_attention_mask
298
+ else:
299
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
300
+
301
+ query = self._split_heads(query, self.num_heads, self.head_dim)
302
+ key = self._split_heads(key, self.num_heads, self.head_dim)
303
+ value = self._split_heads(value, self.num_heads, self.head_dim)
304
+
305
+ if layer_past is not None:
306
+ past_key, past_value = layer_past
307
+ key = torch.cat((past_key, key), dim=-2)
308
+ value = torch.cat((past_value, value), dim=-2)
309
+
310
+ if use_cache is True:
311
+ present = (key, value)
312
+ else:
313
+ present = None
314
+
315
+ if self.reorder_and_upcast_attn:
316
+ attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
317
+ else:
318
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
319
+
320
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
321
+ attn_output = self.c_proj(attn_output)
322
+ attn_output = self.resid_dropout(attn_output)
323
+
324
+ outputs = (attn_output, present)
325
+ if output_attentions:
326
+ outputs += (attn_weights,)
327
+
328
+ return outputs # a, present, (attentions)
329
+
330
+
331
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
332
+ class DecisionTransformerGPT2MLP(nn.Module):
333
+ def __init__(self, intermediate_size, config):
334
+ super().__init__()
335
+ embed_dim = config.hidden_size
336
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
337
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
338
+ self.act = ACT2FN[config.activation_function]
339
+ self.dropout = nn.Dropout(config.resid_pdrop)
340
+
341
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
342
+ hidden_states = self.c_fc(hidden_states)
343
+ hidden_states = self.act(hidden_states)
344
+ hidden_states = self.c_proj(hidden_states)
345
+ hidden_states = self.dropout(hidden_states)
346
+ return hidden_states
347
+
348
+
349
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2
350
+ class DecisionTransformerGPT2Block(nn.Module):
351
+ def __init__(self, config, layer_idx=None):
352
+ super().__init__()
353
+ hidden_size = config.hidden_size
354
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
355
+
356
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
357
+ self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
358
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
359
+
360
+ if config.add_cross_attention:
361
+ self.crossattention = DecisionTransformerGPT2Attention(
362
+ config, is_cross_attention=True, layer_idx=layer_idx
363
+ )
364
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
365
+
366
+ self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)
367
+
368
+ def forward(
369
+ self,
370
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
371
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
372
+ attention_mask: Optional[torch.FloatTensor] = None,
373
+ head_mask: Optional[torch.FloatTensor] = None,
374
+ encoder_hidden_states: Optional[torch.Tensor] = None,
375
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
376
+ use_cache: Optional[bool] = False,
377
+ output_attentions: Optional[bool] = False,
378
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
379
+ residual = hidden_states
380
+ hidden_states = self.ln_1(hidden_states)
381
+ attn_outputs = self.attn(
382
+ hidden_states,
383
+ layer_past=layer_past,
384
+ attention_mask=attention_mask,
385
+ head_mask=head_mask,
386
+ use_cache=use_cache,
387
+ output_attentions=output_attentions,
388
+ )
389
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
390
+ outputs = attn_outputs[1:]
391
+ # residual connection
392
+ hidden_states = attn_output + residual
393
+
394
+ if encoder_hidden_states is not None:
395
+ # add one self-attention block for cross-attention
396
+ if not hasattr(self, "crossattention"):
397
+ raise ValueError(
398
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
399
+ "cross-attention layers by setting `config.add_cross_attention=True`"
400
+ )
401
+ residual = hidden_states
402
+ hidden_states = self.ln_cross_attn(hidden_states)
403
+ cross_attn_outputs = self.crossattention(
404
+ hidden_states,
405
+ attention_mask=attention_mask,
406
+ head_mask=head_mask,
407
+ encoder_hidden_states=encoder_hidden_states,
408
+ encoder_attention_mask=encoder_attention_mask,
409
+ output_attentions=output_attentions,
410
+ )
411
+ attn_output = cross_attn_outputs[0]
412
+ # residual connection
413
+ hidden_states = residual + attn_output
414
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
415
+
416
+ residual = hidden_states
417
+ hidden_states = self.ln_2(hidden_states)
418
+ feed_forward_hidden_states = self.mlp(hidden_states)
419
+ # residual connection
420
+ hidden_states = residual + feed_forward_hidden_states
421
+
422
+ if use_cache:
423
+ outputs = (hidden_states,) + outputs
424
+ else:
425
+ outputs = (hidden_states,) + outputs[1:]
426
+
427
+ return outputs # hidden_states, present, (attentions, cross_attentions)
428
+
429
+
430
+ class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
431
+ """
432
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
433
+ models.
434
+ """
435
+
436
+ config_class = DecisionTransformerConfig
437
+ load_tf_weights = load_tf_weights_in_gpt2
438
+ base_model_prefix = "transformer"
439
+ is_parallelizable = True
440
+ supports_gradient_checkpointing = True
441
+
442
+ def __init__(self, *inputs, **kwargs):
443
+ super().__init__(*inputs, **kwargs)
444
+
445
+ def _init_weights(self, module):
446
+ """Initialize the weights."""
447
+ if isinstance(module, (nn.Linear, Conv1D)):
448
+ # Slightly different from the TF version which uses truncated_normal for initialization
449
+ # cf https://github.com/pytorch/pytorch/pull/5617
450
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
451
+ if module.bias is not None:
452
+ module.bias.data.zero_()
453
+ elif isinstance(module, nn.Embedding):
454
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
455
+ if module.padding_idx is not None:
456
+ module.weight.data[module.padding_idx].zero_()
457
+ elif isinstance(module, nn.LayerNorm):
458
+ module.bias.data.zero_()
459
+ module.weight.data.fill_(1.0)
460
+
461
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
462
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
463
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
464
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
465
+ #
466
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
467
+ for name, p in module.named_parameters():
468
+ if "c_proj" in name and "weight" in name:
469
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
470
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
471
+
472
+
473
+ class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):
474
+ def __init__(self, config):
475
+ super().__init__(config)
476
+
477
+ self.embed_dim = config.hidden_size
478
+
479
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
480
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
481
+
482
+ self.drop = nn.Dropout(config.embd_pdrop)
483
+ self.h = nn.ModuleList(
484
+ [DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
485
+ )
486
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
487
+
488
+ # Model parallel
489
+ self.model_parallel = False
490
+ self.device_map = None
491
+ self.gradient_checkpointing = False
492
+
493
+ # Initialize weights and apply final processing
494
+ self.post_init()
495
+
496
+ def get_input_embeddings(self):
497
+ return self.wte
498
+
499
+ def set_input_embeddings(self, new_embeddings):
500
+ self.wte = new_embeddings
501
+
502
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Model.forward
503
+ def forward(
504
+ self,
505
+ input_ids: Optional[torch.LongTensor] = None,
506
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
507
+ attention_mask: Optional[torch.FloatTensor] = None,
508
+ token_type_ids: Optional[torch.LongTensor] = None,
509
+ position_ids: Optional[torch.LongTensor] = None,
510
+ head_mask: Optional[torch.FloatTensor] = None,
511
+ inputs_embeds: Optional[torch.FloatTensor] = None,
512
+ encoder_hidden_states: Optional[torch.Tensor] = None,
513
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
514
+ use_cache: Optional[bool] = None,
515
+ output_attentions: Optional[bool] = None,
516
+ output_hidden_states: Optional[bool] = None,
517
+ return_dict: Optional[bool] = None,
518
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
519
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
520
+ output_hidden_states = (
521
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
522
+ )
523
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
524
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
525
+
526
+ if input_ids is not None and inputs_embeds is not None:
527
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
528
+ elif input_ids is not None:
529
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
530
+ input_shape = input_ids.size()
531
+ input_ids = input_ids.view(-1, input_shape[-1])
532
+ batch_size = input_ids.shape[0]
533
+ elif inputs_embeds is not None:
534
+ input_shape = inputs_embeds.size()[:-1]
535
+ batch_size = inputs_embeds.shape[0]
536
+ else:
537
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
538
+
539
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
540
+
541
+ if token_type_ids is not None:
542
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
543
+
544
+ if past_key_values is None:
545
+ past_length = 0
546
+ past_key_values = tuple([None] * len(self.h))
547
+ else:
548
+ past_length = past_key_values[0][0].size(-2)
549
+ if position_ids is None:
550
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
551
+ position_ids = position_ids.unsqueeze(0)
552
+
553
+ # GPT2Attention mask.
554
+ if attention_mask is not None:
555
+ if batch_size <= 0:
556
+ raise ValueError("batch_size has to be defined and > 0")
557
+ attention_mask = attention_mask.view(batch_size, -1)
558
+ # We create a 3D attention mask from a 2D tensor mask.
559
+ # Sizes are [batch_size, 1, 1, to_seq_length]
560
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
561
+ # this attention mask is more simple than the triangular masking of causal attention
562
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
563
+ attention_mask = attention_mask[:, None, None, :]
564
+
565
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
566
+ # masked positions, this operation will create a tensor which is 0.0 for
567
+ # positions we want to attend and the dtype's smallest value for masked positions.
568
+ # Since we are adding it to the raw scores before the softmax, this is
569
+ # effectively the same as removing these entirely.
570
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
571
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
572
+
573
+ # If a 2D or 3D attention mask is provided for the cross-attention
574
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
575
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
576
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
577
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
578
+ if encoder_attention_mask is None:
579
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
580
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
581
+ else:
582
+ encoder_attention_mask = None
583
+
584
+ # Prepare head mask if needed
585
+ # 1.0 in head_mask indicate we keep the head
586
+ # attention_probs has shape bsz x n_heads x N x N
587
+ # head_mask has shape n_layer x batch x n_heads x N x N
588
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
589
+
590
+ if inputs_embeds is None:
591
+ inputs_embeds = self.wte(input_ids)
592
+ position_embeds = self.wpe(position_ids)
593
+ hidden_states = inputs_embeds + position_embeds
594
+
595
+ if token_type_ids is not None:
596
+ token_type_embeds = self.wte(token_type_ids)
597
+ hidden_states = hidden_states + token_type_embeds
598
+
599
+ hidden_states = self.drop(hidden_states)
600
+
601
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
602
+
603
+ if self.gradient_checkpointing and self.training:
604
+ if use_cache:
605
+ logger.warning_once(
606
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
607
+ )
608
+ use_cache = False
609
+
610
+ presents = () if use_cache else None
611
+ all_self_attentions = () if output_attentions else None
612
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
613
+ all_hidden_states = () if output_hidden_states else None
614
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
615
+ # Model parallel
616
+ if self.model_parallel:
617
+ torch.cuda.set_device(hidden_states.device)
618
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
619
+ if layer_past is not None:
620
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
621
+ # Ensure that attention_mask is always on the same device as hidden_states
622
+ if attention_mask is not None:
623
+ attention_mask = attention_mask.to(hidden_states.device)
624
+ if isinstance(head_mask, torch.Tensor):
625
+ head_mask = head_mask.to(hidden_states.device)
626
+ if output_hidden_states:
627
+ all_hidden_states = all_hidden_states + (hidden_states,)
628
+
629
+ if self.gradient_checkpointing and self.training:
630
+ outputs = self._gradient_checkpointing_func(
631
+ block.__call__,
632
+ hidden_states,
633
+ None,
634
+ attention_mask,
635
+ head_mask[i],
636
+ encoder_hidden_states,
637
+ encoder_attention_mask,
638
+ use_cache,
639
+ output_attentions,
640
+ )
641
+ else:
642
+ outputs = block(
643
+ hidden_states,
644
+ layer_past=layer_past,
645
+ attention_mask=attention_mask,
646
+ head_mask=head_mask[i],
647
+ encoder_hidden_states=encoder_hidden_states,
648
+ encoder_attention_mask=encoder_attention_mask,
649
+ use_cache=use_cache,
650
+ output_attentions=output_attentions,
651
+ )
652
+
653
+ hidden_states = outputs[0]
654
+ if use_cache is True:
655
+ presents = presents + (outputs[1],)
656
+
657
+ if output_attentions:
658
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
659
+ if self.config.add_cross_attention:
660
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
661
+
662
+ # Model Parallel: If it's the last layer for that device, put things on the next device
663
+ if self.model_parallel:
664
+ for k, v in self.device_map.items():
665
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
666
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
667
+
668
+ hidden_states = self.ln_f(hidden_states)
669
+
670
+ hidden_states = hidden_states.view(output_shape)
671
+ # Add last hidden state
672
+ if output_hidden_states:
673
+ all_hidden_states = all_hidden_states + (hidden_states,)
674
+
675
+ if not return_dict:
676
+ return tuple(
677
+ v
678
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
679
+ if v is not None
680
+ )
681
+
682
+ return BaseModelOutputWithPastAndCrossAttentions(
683
+ last_hidden_state=hidden_states,
684
+ past_key_values=presents,
685
+ hidden_states=all_hidden_states,
686
+ attentions=all_self_attentions,
687
+ cross_attentions=all_cross_attentions,
688
+ )
689
+
690
+
691
+ @dataclass
692
+ class DecisionTransformerOutput(ModelOutput):
693
+ """
694
+ Base class for model's outputs that also contains a pooling of the last hidden states.
695
+
696
+ Args:
697
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
698
+ Sequence of hidden-states at the output of the last layer of the model.
699
+ state_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, state_dim)`):
700
+ Environment state predictions
701
+ action_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, action_dim)`):
702
+ Model action predictions
703
+ return_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, 1)`):
704
+ Predicted returns for each state
705
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
706
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
707
+ shape `(batch_size, sequence_length, hidden_size)`.
708
+
709
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
710
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
711
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
712
+ sequence_length)`.
713
+
714
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
715
+ heads.
716
+ """
717
+
718
+ state_preds: torch.FloatTensor = None
719
+ action_preds: torch.FloatTensor = None
720
+ return_preds: torch.FloatTensor = None
721
+ hidden_states: torch.FloatTensor = None
722
+ attentions: torch.FloatTensor = None
723
+ last_hidden_state: torch.FloatTensor = None
724
+
725
+
726
+ class DecisionTransformerPreTrainedModel(PreTrainedModel):
727
+ """
728
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
729
+ models.
730
+ """
731
+
732
+ config_class = DecisionTransformerConfig
733
+ base_model_prefix = "decision_transformer"
734
+ main_input_name = "states"
735
+ supports_gradient_checkpointing = False
736
+
737
+ def _init_weights(self, module):
738
+ """Initialize the weights"""
739
+ if isinstance(module, nn.Linear):
740
+ # Slightly different from the TF version which uses truncated_normal for initialization
741
+ # cf https://github.com/pytorch/pytorch/pull/5617
742
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
743
+ if module.bias is not None:
744
+ module.bias.data.zero_()
745
+ elif isinstance(module, nn.Embedding):
746
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
747
+ if module.padding_idx is not None:
748
+ module.weight.data[module.padding_idx].zero_()
749
+ elif isinstance(module, nn.LayerNorm):
750
+ module.bias.data.zero_()
751
+ module.weight.data.fill_(1.0)
752
+
753
+
754
+ DECISION_TRANSFORMER_START_DOCSTRING = r"""
755
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
756
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
757
+ behavior.
758
+
759
+ Parameters:
760
+ config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.
761
+ Initializing with a config file does not load the weights associated with the model, only the
762
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
763
+ """
764
+
765
+ DECISION_TRANSFORMER_INPUTS_DOCSTRING = r"""
766
+ Args:
767
+ states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
768
+ The states for each step in the trajectory
769
+ actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
770
+ The actions taken by the "expert" policy for the current state, these are masked for auto regressive
771
+ prediction
772
+ rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
773
+ The rewards for each state, action
774
+ returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
775
+ The returns for each state in the trajectory
776
+ timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
777
+ The timestep for each step in the trajectory
778
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, episode_length)`):
779
+ Masking, used to mask the actions when performing autoregressive prediction
780
+ """
781
+
782
+
783
+ @add_start_docstrings("The Decision Transformer Model", DECISION_TRANSFORMER_START_DOCSTRING)
784
+ class DecisionTransformerModel(DecisionTransformerPreTrainedModel):
785
+ """
786
+
787
+ The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
788
+ setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
789
+
790
+ """
791
+
792
+ def __init__(self, config):
793
+ super().__init__(config)
794
+ self.config = config
795
+ self.hidden_size = config.hidden_size
796
+ # note: the only difference between this GPT2Model and the default Huggingface version
797
+ # is that the positional embeddings are removed (since we'll add those ourselves)
798
+ self.encoder = DecisionTransformerGPT2Model(config)
799
+
800
+ self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
801
+ self.embed_return = torch.nn.Linear(1, config.hidden_size)
802
+ self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
803
+ self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
804
+
805
+ self.embed_ln = nn.LayerNorm(config.hidden_size)
806
+
807
+ # note: we don't predict states or returns for the paper
808
+ self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
809
+ self.predict_action = nn.Sequential(
810
+ *([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
811
+ )
812
+ self.predict_return = torch.nn.Linear(config.hidden_size, 1)
813
+
814
+ # Initialize weights and apply final processing
815
+ self.post_init()
816
+
817
+ @add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
818
+ @replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
819
+ def forward(
820
+ self,
821
+ states: Optional[torch.FloatTensor] = None,
822
+ actions: Optional[torch.FloatTensor] = None,
823
+ rewards: Optional[torch.FloatTensor] = None,
824
+ returns_to_go: Optional[torch.FloatTensor] = None,
825
+ timesteps: Optional[torch.LongTensor] = None,
826
+ attention_mask: Optional[torch.FloatTensor] = None,
827
+ output_hidden_states: Optional[bool] = None,
828
+ output_attentions: Optional[bool] = None,
829
+ return_dict: Optional[bool] = None,
830
+ ) -> Union[Tuple[torch.FloatTensor], DecisionTransformerOutput]:
831
+ r"""
832
+ Returns:
833
+
834
+ Examples:
835
+
836
+ ```python
837
+ >>> from transformers import DecisionTransformerModel
838
+ >>> import torch
839
+
840
+ >>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
841
+ >>> # evaluation
842
+ >>> model = model.to(device)
843
+ >>> model.eval()
844
+
845
+ >>> env = gym.make("Hopper-v3")
846
+ >>> state_dim = env.observation_space.shape[0]
847
+ >>> act_dim = env.action_space.shape[0]
848
+
849
+ >>> state = env.reset()
850
+ >>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
851
+ >>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
852
+ >>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
853
+ >>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
854
+ >>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
855
+ >>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
856
+
857
+ >>> # forward pass
858
+ >>> with torch.no_grad():
859
+ ... state_preds, action_preds, return_preds = model(
860
+ ... states=states,
861
+ ... actions=actions,
862
+ ... rewards=rewards,
863
+ ... returns_to_go=target_return,
864
+ ... timesteps=timesteps,
865
+ ... attention_mask=attention_mask,
866
+ ... return_dict=False,
867
+ ... )
868
+ ```"""
869
+
870
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
871
+ output_hidden_states = (
872
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
873
+ )
874
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
875
+
876
+ batch_size, seq_length = states.shape[0], states.shape[1]
877
+
878
+ if attention_mask is None:
879
+ # attention mask for GPT: 1 if can be attended to, 0 if not
880
+ attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
881
+
882
+ # embed each modality with a different head
883
+ state_embeddings = self.embed_state(states)
884
+ action_embeddings = self.embed_action(actions)
885
+ returns_embeddings = self.embed_return(returns_to_go)
886
+ time_embeddings = self.embed_timestep(timesteps)
887
+
888
+ # time embeddings are treated similar to positional embeddings
889
+ state_embeddings = state_embeddings + time_embeddings
890
+ action_embeddings = action_embeddings + time_embeddings
891
+ returns_embeddings = returns_embeddings + time_embeddings
892
+
893
+ # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
894
+ # which works nice in an autoregressive sense since states predict actions
895
+ stacked_inputs = (
896
+ torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
897
+ .permute(0, 2, 1, 3)
898
+ .reshape(batch_size, 3 * seq_length, self.hidden_size)
899
+ )
900
+ stacked_inputs = self.embed_ln(stacked_inputs)
901
+
902
+ # to make the attention mask fit the stacked inputs, have to stack it as well
903
+ stacked_attention_mask = (
904
+ torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
905
+ .permute(0, 2, 1)
906
+ .reshape(batch_size, 3 * seq_length)
907
+ )
908
+ device = stacked_inputs.device
909
+ # we feed in the input embeddings (not word indices as in NLP) to the model
910
+ encoder_outputs = self.encoder(
911
+ inputs_embeds=stacked_inputs,
912
+ attention_mask=stacked_attention_mask,
913
+ position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
914
+ output_attentions=output_attentions,
915
+ output_hidden_states=output_hidden_states,
916
+ return_dict=return_dict,
917
+ )
918
+ x = encoder_outputs[0]
919
+
920
+ # reshape x so that the second dimension corresponds to the original
921
+ # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
922
+ x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
923
+
924
+ # get predictions
925
+ return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
926
+ state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
927
+ action_preds = self.predict_action(x[:, 1]) # predict next action given state
928
+ if not return_dict:
929
+ return (state_preds, action_preds, return_preds)
930
+
931
+ return DecisionTransformerOutput(
932
+ last_hidden_state=encoder_outputs.last_hidden_state,
933
+ state_preds=state_preds,
934
+ action_preds=action_preds,
935
+ return_preds=return_preds,
936
+ hidden_states=encoder_outputs.hidden_states,
937
+ attentions=encoder_outputs.attentions,
938
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__init__.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
29
+ "tokenization_electra": ["ElectraTokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_electra"] = [
47
+ "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "ElectraForCausalLM",
49
+ "ElectraForMaskedLM",
50
+ "ElectraForMultipleChoice",
51
+ "ElectraForPreTraining",
52
+ "ElectraForQuestionAnswering",
53
+ "ElectraForSequenceClassification",
54
+ "ElectraForTokenClassification",
55
+ "ElectraModel",
56
+ "ElectraPreTrainedModel",
57
+ "load_tf_weights_in_electra",
58
+ ]
59
+
60
+ try:
61
+ if not is_tf_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_tf_electra"] = [
67
+ "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
68
+ "TFElectraForMaskedLM",
69
+ "TFElectraForMultipleChoice",
70
+ "TFElectraForPreTraining",
71
+ "TFElectraForQuestionAnswering",
72
+ "TFElectraForSequenceClassification",
73
+ "TFElectraForTokenClassification",
74
+ "TFElectraModel",
75
+ "TFElectraPreTrainedModel",
76
+ ]
77
+
78
+ try:
79
+ if not is_flax_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ _import_structure["modeling_flax_electra"] = [
85
+ "FlaxElectraForCausalLM",
86
+ "FlaxElectraForMaskedLM",
87
+ "FlaxElectraForMultipleChoice",
88
+ "FlaxElectraForPreTraining",
89
+ "FlaxElectraForQuestionAnswering",
90
+ "FlaxElectraForSequenceClassification",
91
+ "FlaxElectraForTokenClassification",
92
+ "FlaxElectraModel",
93
+ "FlaxElectraPreTrainedModel",
94
+ ]
95
+
96
+
97
+ if TYPE_CHECKING:
98
+ from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
99
+ from .tokenization_electra import ElectraTokenizer
100
+
101
+ try:
102
+ if not is_tokenizers_available():
103
+ raise OptionalDependencyNotAvailable()
104
+ except OptionalDependencyNotAvailable:
105
+ pass
106
+ else:
107
+ from .tokenization_electra_fast import ElectraTokenizerFast
108
+
109
+ try:
110
+ if not is_torch_available():
111
+ raise OptionalDependencyNotAvailable()
112
+ except OptionalDependencyNotAvailable:
113
+ pass
114
+ else:
115
+ from .modeling_electra import (
116
+ ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
117
+ ElectraForCausalLM,
118
+ ElectraForMaskedLM,
119
+ ElectraForMultipleChoice,
120
+ ElectraForPreTraining,
121
+ ElectraForQuestionAnswering,
122
+ ElectraForSequenceClassification,
123
+ ElectraForTokenClassification,
124
+ ElectraModel,
125
+ ElectraPreTrainedModel,
126
+ load_tf_weights_in_electra,
127
+ )
128
+
129
+ try:
130
+ if not is_tf_available():
131
+ raise OptionalDependencyNotAvailable()
132
+ except OptionalDependencyNotAvailable:
133
+ pass
134
+ else:
135
+ from .modeling_tf_electra import (
136
+ TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
137
+ TFElectraForMaskedLM,
138
+ TFElectraForMultipleChoice,
139
+ TFElectraForPreTraining,
140
+ TFElectraForQuestionAnswering,
141
+ TFElectraForSequenceClassification,
142
+ TFElectraForTokenClassification,
143
+ TFElectraModel,
144
+ TFElectraPreTrainedModel,
145
+ )
146
+
147
+ try:
148
+ if not is_flax_available():
149
+ raise OptionalDependencyNotAvailable()
150
+ except OptionalDependencyNotAvailable:
151
+ pass
152
+ else:
153
+ from .modeling_flax_electra import (
154
+ FlaxElectraForCausalLM,
155
+ FlaxElectraForMaskedLM,
156
+ FlaxElectraForMultipleChoice,
157
+ FlaxElectraForPreTraining,
158
+ FlaxElectraForQuestionAnswering,
159
+ FlaxElectraForSequenceClassification,
160
+ FlaxElectraForTokenClassification,
161
+ FlaxElectraModel,
162
+ FlaxElectraPreTrainedModel,
163
+ )
164
+
165
+ else:
166
+ import sys
167
+
168
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/configuration_electra.cpython-310.pyc ADDED
Binary file (8.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/convert_electra_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_electra.cpython-310.pyc ADDED
Binary file (49.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_flax_electra.cpython-310.pyc ADDED
Binary file (40.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/modeling_tf_electra.cpython-310.pyc ADDED
Binary file (51.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/tokenization_electra.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/__pycache__/tokenization_electra_fast.cpython-310.pyc ADDED
Binary file (8.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/configuration_electra.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ ELECTRA model configuration"""
17
+
18
+ from collections import OrderedDict
19
+ from typing import Mapping
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
29
+ "google/electra-small-generator": "https://huggingface.co/google/electra-small-generator/resolve/main/config.json",
30
+ "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/config.json",
31
+ "google/electra-large-generator": "https://huggingface.co/google/electra-large-generator/resolve/main/config.json",
32
+ "google/electra-small-discriminator": (
33
+ "https://huggingface.co/google/electra-small-discriminator/resolve/main/config.json"
34
+ ),
35
+ "google/electra-base-discriminator": (
36
+ "https://huggingface.co/google/electra-base-discriminator/resolve/main/config.json"
37
+ ),
38
+ "google/electra-large-discriminator": (
39
+ "https://huggingface.co/google/electra-large-discriminator/resolve/main/config.json"
40
+ ),
41
+ }
42
+
43
+
44
+ class ElectraConfig(PretrainedConfig):
45
+ r"""
46
+ This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is
47
+ used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture.
48
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA
49
+ [google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture.
50
+
51
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
52
+ documentation from [`PretrainedConfig`] for more information.
53
+
54
+
55
+ Args:
56
+ vocab_size (`int`, *optional*, defaults to 30522):
57
+ Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the
58
+ `inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
59
+ embedding_size (`int`, *optional*, defaults to 128):
60
+ Dimensionality of the encoder layers and the pooler layer.
61
+ hidden_size (`int`, *optional*, defaults to 256):
62
+ Dimensionality of the encoder layers and the pooler layer.
63
+ num_hidden_layers (`int`, *optional*, defaults to 12):
64
+ Number of hidden layers in the Transformer encoder.
65
+ num_attention_heads (`int`, *optional*, defaults to 4):
66
+ Number of attention heads for each attention layer in the Transformer encoder.
67
+ intermediate_size (`int`, *optional*, defaults to 1024):
68
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
69
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
70
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
71
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
72
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
73
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
74
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
75
+ The dropout ratio for the attention probabilities.
76
+ max_position_embeddings (`int`, *optional*, defaults to 512):
77
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
78
+ just in case (e.g., 512 or 1024 or 2048).
79
+ type_vocab_size (`int`, *optional*, defaults to 2):
80
+ The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
81
+ initializer_range (`float`, *optional*, defaults to 0.02):
82
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
83
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
84
+ The epsilon used by the layer normalization layers.
85
+ summary_type (`str`, *optional*, defaults to `"first"`):
86
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
87
+
88
+ Has to be one of the following options:
89
+
90
+ - `"last"`: Take the last token hidden state (like XLNet).
91
+ - `"first"`: Take the first token hidden state (like BERT).
92
+ - `"mean"`: Take the mean of all tokens hidden states.
93
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
94
+ - `"attn"`: Not implemented now, use multi-head attention.
95
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
96
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
97
+
98
+ Whether or not to add a projection after the vector extraction.
99
+ summary_activation (`str`, *optional*):
100
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
101
+
102
+ Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation.
103
+ summary_last_dropout (`float`, *optional*, defaults to 0.0):
104
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
105
+
106
+ The dropout ratio to be used after the projection and activation.
107
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
108
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
109
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
110
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
111
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
112
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
113
+ use_cache (`bool`, *optional*, defaults to `True`):
114
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
115
+ relevant if `config.is_decoder=True`.
116
+ classifier_dropout (`float`, *optional*):
117
+ The dropout ratio for the classification head.
118
+
119
+ Examples:
120
+
121
+ ```python
122
+ >>> from transformers import ElectraConfig, ElectraModel
123
+
124
+ >>> # Initializing a ELECTRA electra-base-uncased style configuration
125
+ >>> configuration = ElectraConfig()
126
+
127
+ >>> # Initializing a model (with random weights) from the electra-base-uncased style configuration
128
+ >>> model = ElectraModel(configuration)
129
+
130
+ >>> # Accessing the model configuration
131
+ >>> configuration = model.config
132
+ ```"""
133
+
134
+ model_type = "electra"
135
+
136
+ def __init__(
137
+ self,
138
+ vocab_size=30522,
139
+ embedding_size=128,
140
+ hidden_size=256,
141
+ num_hidden_layers=12,
142
+ num_attention_heads=4,
143
+ intermediate_size=1024,
144
+ hidden_act="gelu",
145
+ hidden_dropout_prob=0.1,
146
+ attention_probs_dropout_prob=0.1,
147
+ max_position_embeddings=512,
148
+ type_vocab_size=2,
149
+ initializer_range=0.02,
150
+ layer_norm_eps=1e-12,
151
+ summary_type="first",
152
+ summary_use_proj=True,
153
+ summary_activation="gelu",
154
+ summary_last_dropout=0.1,
155
+ pad_token_id=0,
156
+ position_embedding_type="absolute",
157
+ use_cache=True,
158
+ classifier_dropout=None,
159
+ **kwargs,
160
+ ):
161
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
162
+
163
+ self.vocab_size = vocab_size
164
+ self.embedding_size = embedding_size
165
+ self.hidden_size = hidden_size
166
+ self.num_hidden_layers = num_hidden_layers
167
+ self.num_attention_heads = num_attention_heads
168
+ self.intermediate_size = intermediate_size
169
+ self.hidden_act = hidden_act
170
+ self.hidden_dropout_prob = hidden_dropout_prob
171
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
172
+ self.max_position_embeddings = max_position_embeddings
173
+ self.type_vocab_size = type_vocab_size
174
+ self.initializer_range = initializer_range
175
+ self.layer_norm_eps = layer_norm_eps
176
+
177
+ self.summary_type = summary_type
178
+ self.summary_use_proj = summary_use_proj
179
+ self.summary_activation = summary_activation
180
+ self.summary_last_dropout = summary_last_dropout
181
+ self.position_embedding_type = position_embedding_type
182
+ self.use_cache = use_cache
183
+ self.classifier_dropout = classifier_dropout
184
+
185
+
186
+ class ElectraOnnxConfig(OnnxConfig):
187
+ @property
188
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
189
+ if self.task == "multiple-choice":
190
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
191
+ else:
192
+ dynamic_axis = {0: "batch", 1: "sequence"}
193
+ return OrderedDict(
194
+ [
195
+ ("input_ids", dynamic_axis),
196
+ ("attention_mask", dynamic_axis),
197
+ ("token_type_ids", dynamic_axis),
198
+ ]
199
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ELECTRA checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra
23
+ from transformers.utils import logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator):
30
+ # Initialise PyTorch model
31
+ config = ElectraConfig.from_json_file(config_file)
32
+ print(f"Building PyTorch model from configuration: {config}")
33
+
34
+ if discriminator_or_generator == "discriminator":
35
+ model = ElectraForPreTraining(config)
36
+ elif discriminator_or_generator == "generator":
37
+ model = ElectraForMaskedLM(config)
38
+ else:
39
+ raise ValueError("The discriminator_or_generator argument should be either 'discriminator' or 'generator'")
40
+
41
+ # Load weights from tf checkpoint
42
+ load_tf_weights_in_electra(
43
+ model, config, tf_checkpoint_path, discriminator_or_generator=discriminator_or_generator
44
+ )
45
+
46
+ # Save pytorch-model
47
+ print(f"Save PyTorch model to {pytorch_dump_path}")
48
+ torch.save(model.state_dict(), pytorch_dump_path)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ parser = argparse.ArgumentParser()
53
+ # Required parameters
54
+ parser.add_argument(
55
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
56
+ )
57
+ parser.add_argument(
58
+ "--config_file",
59
+ default=None,
60
+ type=str,
61
+ required=True,
62
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
63
+ )
64
+ parser.add_argument(
65
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
66
+ )
67
+ parser.add_argument(
68
+ "--discriminator_or_generator",
69
+ default=None,
70
+ type=str,
71
+ required=True,
72
+ help=(
73
+ "Whether to export the generator or the discriminator. Should be a string, either 'discriminator' or "
74
+ "'generator'."
75
+ ),
76
+ )
77
+ args = parser.parse_args()
78
+ convert_tf_checkpoint_to_pytorch(
79
+ args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.discriminator_or_generator
80
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/modeling_electra.py ADDED
@@ -0,0 +1,1686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ELECTRA model."""
16
+
17
+ import math
18
+ import os
19
+ from dataclasses import dataclass
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN, get_activation
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithCrossAttentions,
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ CausalLMOutputWithCrossAttentions,
32
+ MaskedLMOutput,
33
+ MultipleChoiceModelOutput,
34
+ QuestionAnsweringModelOutput,
35
+ SequenceClassifierOutput,
36
+ TokenClassifierOutput,
37
+ )
38
+ from ...modeling_utils import PreTrainedModel, SequenceSummary
39
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
40
+ from ...utils import (
41
+ ModelOutput,
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_electra import ElectraConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator"
54
+ _CONFIG_FOR_DOC = "ElectraConfig"
55
+
56
+ ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [
57
+ "google/electra-small-generator",
58
+ "google/electra-base-generator",
59
+ "google/electra-large-generator",
60
+ "google/electra-small-discriminator",
61
+ "google/electra-base-discriminator",
62
+ "google/electra-large-discriminator",
63
+ # See all ELECTRA models at https://huggingface.co/models?filter=electra
64
+ ]
65
+
66
+
67
+ def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"):
68
+ """Load tf checkpoints in a pytorch model."""
69
+ try:
70
+ import re
71
+
72
+ import numpy as np
73
+ import tensorflow as tf
74
+ except ImportError:
75
+ logger.error(
76
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
77
+ "https://www.tensorflow.org/install/ for installation instructions."
78
+ )
79
+ raise
80
+ tf_path = os.path.abspath(tf_checkpoint_path)
81
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
82
+ # Load weights from TF model
83
+ init_vars = tf.train.list_variables(tf_path)
84
+ names = []
85
+ arrays = []
86
+ for name, shape in init_vars:
87
+ logger.info(f"Loading TF weight {name} with shape {shape}")
88
+ array = tf.train.load_variable(tf_path, name)
89
+ names.append(name)
90
+ arrays.append(array)
91
+ for name, array in zip(names, arrays):
92
+ original_name: str = name
93
+
94
+ try:
95
+ if isinstance(model, ElectraForMaskedLM):
96
+ name = name.replace("electra/embeddings/", "generator/embeddings/")
97
+
98
+ if discriminator_or_generator == "generator":
99
+ name = name.replace("electra/", "discriminator/")
100
+ name = name.replace("generator/", "electra/")
101
+
102
+ name = name.replace("dense_1", "dense_prediction")
103
+ name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias")
104
+
105
+ name = name.split("/")
106
+ # print(original_name, name)
107
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
108
+ # which are not required for using pretrained model
109
+ if any(n in ["global_step", "temperature"] for n in name):
110
+ logger.info(f"Skipping {original_name}")
111
+ continue
112
+ pointer = model
113
+ for m_name in name:
114
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
115
+ scope_names = re.split(r"_(\d+)", m_name)
116
+ else:
117
+ scope_names = [m_name]
118
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
119
+ pointer = getattr(pointer, "weight")
120
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
121
+ pointer = getattr(pointer, "bias")
122
+ elif scope_names[0] == "output_weights":
123
+ pointer = getattr(pointer, "weight")
124
+ elif scope_names[0] == "squad":
125
+ pointer = getattr(pointer, "classifier")
126
+ else:
127
+ pointer = getattr(pointer, scope_names[0])
128
+ if len(scope_names) >= 2:
129
+ num = int(scope_names[1])
130
+ pointer = pointer[num]
131
+ if m_name.endswith("_embeddings"):
132
+ pointer = getattr(pointer, "weight")
133
+ elif m_name == "kernel":
134
+ array = np.transpose(array)
135
+ try:
136
+ if pointer.shape != array.shape:
137
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
138
+ except ValueError as e:
139
+ e.args += (pointer.shape, array.shape)
140
+ raise
141
+ print(f"Initialize PyTorch weight {name}", original_name)
142
+ pointer.data = torch.from_numpy(array)
143
+ except AttributeError as e:
144
+ print(f"Skipping {original_name}", name, e)
145
+ continue
146
+ return model
147
+
148
+
149
+ class ElectraEmbeddings(nn.Module):
150
+ """Construct the embeddings from word, position and token_type embeddings."""
151
+
152
+ def __init__(self, config):
153
+ super().__init__()
154
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
155
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
156
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
157
+
158
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
159
+ # any TensorFlow checkpoint file
160
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
161
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
162
+
163
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
164
+ self.register_buffer(
165
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
166
+ )
167
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
168
+ self.register_buffer(
169
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
170
+ )
171
+
172
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
173
+ def forward(
174
+ self,
175
+ input_ids: Optional[torch.LongTensor] = None,
176
+ token_type_ids: Optional[torch.LongTensor] = None,
177
+ position_ids: Optional[torch.LongTensor] = None,
178
+ inputs_embeds: Optional[torch.FloatTensor] = None,
179
+ past_key_values_length: int = 0,
180
+ ) -> torch.Tensor:
181
+ if input_ids is not None:
182
+ input_shape = input_ids.size()
183
+ else:
184
+ input_shape = inputs_embeds.size()[:-1]
185
+
186
+ seq_length = input_shape[1]
187
+
188
+ if position_ids is None:
189
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
190
+
191
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
192
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
193
+ # issue #5664
194
+ if token_type_ids is None:
195
+ if hasattr(self, "token_type_ids"):
196
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
197
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
198
+ token_type_ids = buffered_token_type_ids_expanded
199
+ else:
200
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
201
+
202
+ if inputs_embeds is None:
203
+ inputs_embeds = self.word_embeddings(input_ids)
204
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
205
+
206
+ embeddings = inputs_embeds + token_type_embeddings
207
+ if self.position_embedding_type == "absolute":
208
+ position_embeddings = self.position_embeddings(position_ids)
209
+ embeddings += position_embeddings
210
+ embeddings = self.LayerNorm(embeddings)
211
+ embeddings = self.dropout(embeddings)
212
+ return embeddings
213
+
214
+
215
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Electra
216
+ class ElectraSelfAttention(nn.Module):
217
+ def __init__(self, config, position_embedding_type=None):
218
+ super().__init__()
219
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
220
+ raise ValueError(
221
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
222
+ f"heads ({config.num_attention_heads})"
223
+ )
224
+
225
+ self.num_attention_heads = config.num_attention_heads
226
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
227
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
228
+
229
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
230
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
231
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
232
+
233
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
234
+ self.position_embedding_type = position_embedding_type or getattr(
235
+ config, "position_embedding_type", "absolute"
236
+ )
237
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
238
+ self.max_position_embeddings = config.max_position_embeddings
239
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
240
+
241
+ self.is_decoder = config.is_decoder
242
+
243
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
244
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
245
+ x = x.view(new_x_shape)
246
+ return x.permute(0, 2, 1, 3)
247
+
248
+ def forward(
249
+ self,
250
+ hidden_states: torch.Tensor,
251
+ attention_mask: Optional[torch.FloatTensor] = None,
252
+ head_mask: Optional[torch.FloatTensor] = None,
253
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
254
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
255
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
256
+ output_attentions: Optional[bool] = False,
257
+ ) -> Tuple[torch.Tensor]:
258
+ mixed_query_layer = self.query(hidden_states)
259
+
260
+ # If this is instantiated as a cross-attention module, the keys
261
+ # and values come from an encoder; the attention mask needs to be
262
+ # such that the encoder's padding tokens are not attended to.
263
+ is_cross_attention = encoder_hidden_states is not None
264
+
265
+ if is_cross_attention and past_key_value is not None:
266
+ # reuse k,v, cross_attentions
267
+ key_layer = past_key_value[0]
268
+ value_layer = past_key_value[1]
269
+ attention_mask = encoder_attention_mask
270
+ elif is_cross_attention:
271
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
272
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
273
+ attention_mask = encoder_attention_mask
274
+ elif past_key_value is not None:
275
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
276
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
277
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
278
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
279
+ else:
280
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
281
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
282
+
283
+ query_layer = self.transpose_for_scores(mixed_query_layer)
284
+
285
+ use_cache = past_key_value is not None
286
+ if self.is_decoder:
287
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
288
+ # Further calls to cross_attention layer can then reuse all cross-attention
289
+ # key/value_states (first "if" case)
290
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
291
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
292
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
293
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
294
+ past_key_value = (key_layer, value_layer)
295
+
296
+ # Take the dot product between "query" and "key" to get the raw attention scores.
297
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
298
+
299
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
300
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
301
+ if use_cache:
302
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
303
+ -1, 1
304
+ )
305
+ else:
306
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
307
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
308
+ distance = position_ids_l - position_ids_r
309
+
310
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
311
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
312
+
313
+ if self.position_embedding_type == "relative_key":
314
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
315
+ attention_scores = attention_scores + relative_position_scores
316
+ elif self.position_embedding_type == "relative_key_query":
317
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
318
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
319
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
320
+
321
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
322
+ if attention_mask is not None:
323
+ # Apply the attention mask is (precomputed for all layers in ElectraModel forward() function)
324
+ attention_scores = attention_scores + attention_mask
325
+
326
+ # Normalize the attention scores to probabilities.
327
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
328
+
329
+ # This is actually dropping out entire tokens to attend to, which might
330
+ # seem a bit unusual, but is taken from the original Transformer paper.
331
+ attention_probs = self.dropout(attention_probs)
332
+
333
+ # Mask heads if we want to
334
+ if head_mask is not None:
335
+ attention_probs = attention_probs * head_mask
336
+
337
+ context_layer = torch.matmul(attention_probs, value_layer)
338
+
339
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
340
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
341
+ context_layer = context_layer.view(new_context_layer_shape)
342
+
343
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
344
+
345
+ if self.is_decoder:
346
+ outputs = outputs + (past_key_value,)
347
+ return outputs
348
+
349
+
350
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
351
+ class ElectraSelfOutput(nn.Module):
352
+ def __init__(self, config):
353
+ super().__init__()
354
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
355
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
356
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
357
+
358
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
359
+ hidden_states = self.dense(hidden_states)
360
+ hidden_states = self.dropout(hidden_states)
361
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
362
+ return hidden_states
363
+
364
+
365
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Electra
366
+ class ElectraAttention(nn.Module):
367
+ def __init__(self, config, position_embedding_type=None):
368
+ super().__init__()
369
+ self.self = ElectraSelfAttention(config, position_embedding_type=position_embedding_type)
370
+ self.output = ElectraSelfOutput(config)
371
+ self.pruned_heads = set()
372
+
373
+ def prune_heads(self, heads):
374
+ if len(heads) == 0:
375
+ return
376
+ heads, index = find_pruneable_heads_and_indices(
377
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
378
+ )
379
+
380
+ # Prune linear layers
381
+ self.self.query = prune_linear_layer(self.self.query, index)
382
+ self.self.key = prune_linear_layer(self.self.key, index)
383
+ self.self.value = prune_linear_layer(self.self.value, index)
384
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
385
+
386
+ # Update hyper params and store pruned heads
387
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
388
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
389
+ self.pruned_heads = self.pruned_heads.union(heads)
390
+
391
+ def forward(
392
+ self,
393
+ hidden_states: torch.Tensor,
394
+ attention_mask: Optional[torch.FloatTensor] = None,
395
+ head_mask: Optional[torch.FloatTensor] = None,
396
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
397
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
398
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
399
+ output_attentions: Optional[bool] = False,
400
+ ) -> Tuple[torch.Tensor]:
401
+ self_outputs = self.self(
402
+ hidden_states,
403
+ attention_mask,
404
+ head_mask,
405
+ encoder_hidden_states,
406
+ encoder_attention_mask,
407
+ past_key_value,
408
+ output_attentions,
409
+ )
410
+ attention_output = self.output(self_outputs[0], hidden_states)
411
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
412
+ return outputs
413
+
414
+
415
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
416
+ class ElectraIntermediate(nn.Module):
417
+ def __init__(self, config):
418
+ super().__init__()
419
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
420
+ if isinstance(config.hidden_act, str):
421
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
422
+ else:
423
+ self.intermediate_act_fn = config.hidden_act
424
+
425
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
426
+ hidden_states = self.dense(hidden_states)
427
+ hidden_states = self.intermediate_act_fn(hidden_states)
428
+ return hidden_states
429
+
430
+
431
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
432
+ class ElectraOutput(nn.Module):
433
+ def __init__(self, config):
434
+ super().__init__()
435
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
436
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
437
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
438
+
439
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
440
+ hidden_states = self.dense(hidden_states)
441
+ hidden_states = self.dropout(hidden_states)
442
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
443
+ return hidden_states
444
+
445
+
446
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Electra
447
+ class ElectraLayer(nn.Module):
448
+ def __init__(self, config):
449
+ super().__init__()
450
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
451
+ self.seq_len_dim = 1
452
+ self.attention = ElectraAttention(config)
453
+ self.is_decoder = config.is_decoder
454
+ self.add_cross_attention = config.add_cross_attention
455
+ if self.add_cross_attention:
456
+ if not self.is_decoder:
457
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
458
+ self.crossattention = ElectraAttention(config, position_embedding_type="absolute")
459
+ self.intermediate = ElectraIntermediate(config)
460
+ self.output = ElectraOutput(config)
461
+
462
+ def forward(
463
+ self,
464
+ hidden_states: torch.Tensor,
465
+ attention_mask: Optional[torch.FloatTensor] = None,
466
+ head_mask: Optional[torch.FloatTensor] = None,
467
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
468
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
469
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
470
+ output_attentions: Optional[bool] = False,
471
+ ) -> Tuple[torch.Tensor]:
472
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
473
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
474
+ self_attention_outputs = self.attention(
475
+ hidden_states,
476
+ attention_mask,
477
+ head_mask,
478
+ output_attentions=output_attentions,
479
+ past_key_value=self_attn_past_key_value,
480
+ )
481
+ attention_output = self_attention_outputs[0]
482
+
483
+ # if decoder, the last output is tuple of self-attn cache
484
+ if self.is_decoder:
485
+ outputs = self_attention_outputs[1:-1]
486
+ present_key_value = self_attention_outputs[-1]
487
+ else:
488
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
489
+
490
+ cross_attn_present_key_value = None
491
+ if self.is_decoder and encoder_hidden_states is not None:
492
+ if not hasattr(self, "crossattention"):
493
+ raise ValueError(
494
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
495
+ " by setting `config.add_cross_attention=True`"
496
+ )
497
+
498
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
499
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
500
+ cross_attention_outputs = self.crossattention(
501
+ attention_output,
502
+ attention_mask,
503
+ head_mask,
504
+ encoder_hidden_states,
505
+ encoder_attention_mask,
506
+ cross_attn_past_key_value,
507
+ output_attentions,
508
+ )
509
+ attention_output = cross_attention_outputs[0]
510
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
511
+
512
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
513
+ cross_attn_present_key_value = cross_attention_outputs[-1]
514
+ present_key_value = present_key_value + cross_attn_present_key_value
515
+
516
+ layer_output = apply_chunking_to_forward(
517
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
518
+ )
519
+ outputs = (layer_output,) + outputs
520
+
521
+ # if decoder, return the attn key/values as the last output
522
+ if self.is_decoder:
523
+ outputs = outputs + (present_key_value,)
524
+
525
+ return outputs
526
+
527
+ def feed_forward_chunk(self, attention_output):
528
+ intermediate_output = self.intermediate(attention_output)
529
+ layer_output = self.output(intermediate_output, attention_output)
530
+ return layer_output
531
+
532
+
533
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Electra
534
+ class ElectraEncoder(nn.Module):
535
+ def __init__(self, config):
536
+ super().__init__()
537
+ self.config = config
538
+ self.layer = nn.ModuleList([ElectraLayer(config) for _ in range(config.num_hidden_layers)])
539
+ self.gradient_checkpointing = False
540
+
541
+ def forward(
542
+ self,
543
+ hidden_states: torch.Tensor,
544
+ attention_mask: Optional[torch.FloatTensor] = None,
545
+ head_mask: Optional[torch.FloatTensor] = None,
546
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
547
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
548
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
549
+ use_cache: Optional[bool] = None,
550
+ output_attentions: Optional[bool] = False,
551
+ output_hidden_states: Optional[bool] = False,
552
+ return_dict: Optional[bool] = True,
553
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
554
+ all_hidden_states = () if output_hidden_states else None
555
+ all_self_attentions = () if output_attentions else None
556
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
557
+
558
+ if self.gradient_checkpointing and self.training:
559
+ if use_cache:
560
+ logger.warning_once(
561
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
562
+ )
563
+ use_cache = False
564
+
565
+ next_decoder_cache = () if use_cache else None
566
+ for i, layer_module in enumerate(self.layer):
567
+ if output_hidden_states:
568
+ all_hidden_states = all_hidden_states + (hidden_states,)
569
+
570
+ layer_head_mask = head_mask[i] if head_mask is not None else None
571
+ past_key_value = past_key_values[i] if past_key_values is not None else None
572
+
573
+ if self.gradient_checkpointing and self.training:
574
+ layer_outputs = self._gradient_checkpointing_func(
575
+ layer_module.__call__,
576
+ hidden_states,
577
+ attention_mask,
578
+ layer_head_mask,
579
+ encoder_hidden_states,
580
+ encoder_attention_mask,
581
+ past_key_value,
582
+ output_attentions,
583
+ )
584
+ else:
585
+ layer_outputs = layer_module(
586
+ hidden_states,
587
+ attention_mask,
588
+ layer_head_mask,
589
+ encoder_hidden_states,
590
+ encoder_attention_mask,
591
+ past_key_value,
592
+ output_attentions,
593
+ )
594
+
595
+ hidden_states = layer_outputs[0]
596
+ if use_cache:
597
+ next_decoder_cache += (layer_outputs[-1],)
598
+ if output_attentions:
599
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
600
+ if self.config.add_cross_attention:
601
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
602
+
603
+ if output_hidden_states:
604
+ all_hidden_states = all_hidden_states + (hidden_states,)
605
+
606
+ if not return_dict:
607
+ return tuple(
608
+ v
609
+ for v in [
610
+ hidden_states,
611
+ next_decoder_cache,
612
+ all_hidden_states,
613
+ all_self_attentions,
614
+ all_cross_attentions,
615
+ ]
616
+ if v is not None
617
+ )
618
+ return BaseModelOutputWithPastAndCrossAttentions(
619
+ last_hidden_state=hidden_states,
620
+ past_key_values=next_decoder_cache,
621
+ hidden_states=all_hidden_states,
622
+ attentions=all_self_attentions,
623
+ cross_attentions=all_cross_attentions,
624
+ )
625
+
626
+
627
+ class ElectraDiscriminatorPredictions(nn.Module):
628
+ """Prediction module for the discriminator, made up of two dense layers."""
629
+
630
+ def __init__(self, config):
631
+ super().__init__()
632
+
633
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
634
+ self.activation = get_activation(config.hidden_act)
635
+ self.dense_prediction = nn.Linear(config.hidden_size, 1)
636
+ self.config = config
637
+
638
+ def forward(self, discriminator_hidden_states):
639
+ hidden_states = self.dense(discriminator_hidden_states)
640
+ hidden_states = self.activation(hidden_states)
641
+ logits = self.dense_prediction(hidden_states).squeeze(-1)
642
+
643
+ return logits
644
+
645
+
646
+ class ElectraGeneratorPredictions(nn.Module):
647
+ """Prediction module for the generator, made up of two dense layers."""
648
+
649
+ def __init__(self, config):
650
+ super().__init__()
651
+
652
+ self.activation = get_activation("gelu")
653
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
654
+ self.dense = nn.Linear(config.hidden_size, config.embedding_size)
655
+
656
+ def forward(self, generator_hidden_states):
657
+ hidden_states = self.dense(generator_hidden_states)
658
+ hidden_states = self.activation(hidden_states)
659
+ hidden_states = self.LayerNorm(hidden_states)
660
+
661
+ return hidden_states
662
+
663
+
664
+ class ElectraPreTrainedModel(PreTrainedModel):
665
+ """
666
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
667
+ models.
668
+ """
669
+
670
+ config_class = ElectraConfig
671
+ load_tf_weights = load_tf_weights_in_electra
672
+ base_model_prefix = "electra"
673
+ supports_gradient_checkpointing = True
674
+
675
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
676
+ def _init_weights(self, module):
677
+ """Initialize the weights"""
678
+ if isinstance(module, nn.Linear):
679
+ # Slightly different from the TF version which uses truncated_normal for initialization
680
+ # cf https://github.com/pytorch/pytorch/pull/5617
681
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
682
+ if module.bias is not None:
683
+ module.bias.data.zero_()
684
+ elif isinstance(module, nn.Embedding):
685
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
686
+ if module.padding_idx is not None:
687
+ module.weight.data[module.padding_idx].zero_()
688
+ elif isinstance(module, nn.LayerNorm):
689
+ module.bias.data.zero_()
690
+ module.weight.data.fill_(1.0)
691
+
692
+
693
+ @dataclass
694
+ class ElectraForPreTrainingOutput(ModelOutput):
695
+ """
696
+ Output type of [`ElectraForPreTraining`].
697
+
698
+ Args:
699
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
700
+ Total loss of the ELECTRA objective.
701
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
702
+ Prediction scores of the head (scores for each token before SoftMax).
703
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
704
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
705
+ shape `(batch_size, sequence_length, hidden_size)`.
706
+
707
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
708
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
709
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
710
+ sequence_length)`.
711
+
712
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
713
+ heads.
714
+ """
715
+
716
+ loss: Optional[torch.FloatTensor] = None
717
+ logits: torch.FloatTensor = None
718
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
719
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
720
+
721
+
722
+ ELECTRA_START_DOCSTRING = r"""
723
+
724
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
725
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
726
+ etc.)
727
+
728
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
729
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
730
+ and behavior.
731
+
732
+ Parameters:
733
+ config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.
734
+ Initializing with a config file does not load the weights associated with the model, only the
735
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
736
+ """
737
+
738
+ ELECTRA_INPUTS_DOCSTRING = r"""
739
+ Args:
740
+ input_ids (`torch.LongTensor` of shape `({0})`):
741
+ Indices of input sequence tokens in the vocabulary.
742
+
743
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
744
+ [`PreTrainedTokenizer.__call__`] for details.
745
+
746
+ [What are input IDs?](../glossary#input-ids)
747
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
748
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
749
+
750
+ - 1 for tokens that are **not masked**,
751
+ - 0 for tokens that are **masked**.
752
+
753
+ [What are attention masks?](../glossary#attention-mask)
754
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
755
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
756
+ 1]`:
757
+
758
+ - 0 corresponds to a *sentence A* token,
759
+ - 1 corresponds to a *sentence B* token.
760
+
761
+ [What are token type IDs?](../glossary#token-type-ids)
762
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
763
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
764
+ config.max_position_embeddings - 1]`.
765
+
766
+ [What are position IDs?](../glossary#position-ids)
767
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
768
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
769
+
770
+ - 1 indicates the head is **not masked**,
771
+ - 0 indicates the head is **masked**.
772
+
773
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
774
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
775
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
776
+ model's internal embedding lookup matrix.
777
+ encoder_hidden_states (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
778
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
779
+ the model is configured as a decoder.
780
+ encoder_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
781
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
782
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
783
+
784
+ - 1 indicates the head is **not masked**,
785
+ - 0 indicates the head is **masked**.
786
+
787
+ output_attentions (`bool`, *optional*):
788
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
789
+ tensors for more detail.
790
+ output_hidden_states (`bool`, *optional*):
791
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
792
+ more detail.
793
+ return_dict (`bool`, *optional*):
794
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
795
+ """
796
+
797
+
798
+ @add_start_docstrings(
799
+ "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
800
+ "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
801
+ "hidden size and embedding size are different. "
802
+ ""
803
+ "Both the generator and discriminator checkpoints may be loaded into this model.",
804
+ ELECTRA_START_DOCSTRING,
805
+ )
806
+ class ElectraModel(ElectraPreTrainedModel):
807
+ def __init__(self, config):
808
+ super().__init__(config)
809
+ self.embeddings = ElectraEmbeddings(config)
810
+
811
+ if config.embedding_size != config.hidden_size:
812
+ self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
813
+
814
+ self.encoder = ElectraEncoder(config)
815
+ self.config = config
816
+ # Initialize weights and apply final processing
817
+ self.post_init()
818
+
819
+ def get_input_embeddings(self):
820
+ return self.embeddings.word_embeddings
821
+
822
+ def set_input_embeddings(self, value):
823
+ self.embeddings.word_embeddings = value
824
+
825
+ def _prune_heads(self, heads_to_prune):
826
+ """
827
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
828
+ class PreTrainedModel
829
+ """
830
+ for layer, heads in heads_to_prune.items():
831
+ self.encoder.layer[layer].attention.prune_heads(heads)
832
+
833
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
834
+ @add_code_sample_docstrings(
835
+ checkpoint=_CHECKPOINT_FOR_DOC,
836
+ output_type=BaseModelOutputWithCrossAttentions,
837
+ config_class=_CONFIG_FOR_DOC,
838
+ )
839
+ def forward(
840
+ self,
841
+ input_ids: Optional[torch.Tensor] = None,
842
+ attention_mask: Optional[torch.Tensor] = None,
843
+ token_type_ids: Optional[torch.Tensor] = None,
844
+ position_ids: Optional[torch.Tensor] = None,
845
+ head_mask: Optional[torch.Tensor] = None,
846
+ inputs_embeds: Optional[torch.Tensor] = None,
847
+ encoder_hidden_states: Optional[torch.Tensor] = None,
848
+ encoder_attention_mask: Optional[torch.Tensor] = None,
849
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
850
+ use_cache: Optional[bool] = None,
851
+ output_attentions: Optional[bool] = None,
852
+ output_hidden_states: Optional[bool] = None,
853
+ return_dict: Optional[bool] = None,
854
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]:
855
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
856
+ output_hidden_states = (
857
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
858
+ )
859
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
860
+
861
+ if input_ids is not None and inputs_embeds is not None:
862
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
863
+ elif input_ids is not None:
864
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
865
+ input_shape = input_ids.size()
866
+ elif inputs_embeds is not None:
867
+ input_shape = inputs_embeds.size()[:-1]
868
+ else:
869
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
870
+
871
+ batch_size, seq_length = input_shape
872
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
873
+
874
+ # past_key_values_length
875
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
876
+
877
+ if attention_mask is None:
878
+ attention_mask = torch.ones(input_shape, device=device)
879
+ if token_type_ids is None:
880
+ if hasattr(self.embeddings, "token_type_ids"):
881
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
882
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
883
+ token_type_ids = buffered_token_type_ids_expanded
884
+ else:
885
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
886
+
887
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
888
+
889
+ # If a 2D or 3D attention mask is provided for the cross-attention
890
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
891
+ if self.config.is_decoder and encoder_hidden_states is not None:
892
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
893
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
894
+ if encoder_attention_mask is None:
895
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
896
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
897
+ else:
898
+ encoder_extended_attention_mask = None
899
+
900
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
901
+
902
+ hidden_states = self.embeddings(
903
+ input_ids=input_ids,
904
+ position_ids=position_ids,
905
+ token_type_ids=token_type_ids,
906
+ inputs_embeds=inputs_embeds,
907
+ past_key_values_length=past_key_values_length,
908
+ )
909
+
910
+ if hasattr(self, "embeddings_project"):
911
+ hidden_states = self.embeddings_project(hidden_states)
912
+
913
+ hidden_states = self.encoder(
914
+ hidden_states,
915
+ attention_mask=extended_attention_mask,
916
+ head_mask=head_mask,
917
+ encoder_hidden_states=encoder_hidden_states,
918
+ encoder_attention_mask=encoder_extended_attention_mask,
919
+ past_key_values=past_key_values,
920
+ use_cache=use_cache,
921
+ output_attentions=output_attentions,
922
+ output_hidden_states=output_hidden_states,
923
+ return_dict=return_dict,
924
+ )
925
+
926
+ return hidden_states
927
+
928
+
929
+ class ElectraClassificationHead(nn.Module):
930
+ """Head for sentence-level classification tasks."""
931
+
932
+ def __init__(self, config):
933
+ super().__init__()
934
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
935
+ classifier_dropout = (
936
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
937
+ )
938
+ self.activation = get_activation("gelu")
939
+ self.dropout = nn.Dropout(classifier_dropout)
940
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
941
+
942
+ def forward(self, features, **kwargs):
943
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
944
+ x = self.dropout(x)
945
+ x = self.dense(x)
946
+ x = self.activation(x) # although BERT uses tanh here, it seems Electra authors used gelu here
947
+ x = self.dropout(x)
948
+ x = self.out_proj(x)
949
+ return x
950
+
951
+
952
+ @add_start_docstrings(
953
+ """
954
+ ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the
955
+ pooled output) e.g. for GLUE tasks.
956
+ """,
957
+ ELECTRA_START_DOCSTRING,
958
+ )
959
+ class ElectraForSequenceClassification(ElectraPreTrainedModel):
960
+ def __init__(self, config):
961
+ super().__init__(config)
962
+ self.num_labels = config.num_labels
963
+ self.config = config
964
+ self.electra = ElectraModel(config)
965
+ self.classifier = ElectraClassificationHead(config)
966
+
967
+ # Initialize weights and apply final processing
968
+ self.post_init()
969
+
970
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
971
+ @add_code_sample_docstrings(
972
+ checkpoint="bhadresh-savani/electra-base-emotion",
973
+ output_type=SequenceClassifierOutput,
974
+ config_class=_CONFIG_FOR_DOC,
975
+ expected_output="'joy'",
976
+ expected_loss=0.06,
977
+ )
978
+ def forward(
979
+ self,
980
+ input_ids: Optional[torch.Tensor] = None,
981
+ attention_mask: Optional[torch.Tensor] = None,
982
+ token_type_ids: Optional[torch.Tensor] = None,
983
+ position_ids: Optional[torch.Tensor] = None,
984
+ head_mask: Optional[torch.Tensor] = None,
985
+ inputs_embeds: Optional[torch.Tensor] = None,
986
+ labels: Optional[torch.Tensor] = None,
987
+ output_attentions: Optional[bool] = None,
988
+ output_hidden_states: Optional[bool] = None,
989
+ return_dict: Optional[bool] = None,
990
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
991
+ r"""
992
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
993
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
994
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
995
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
996
+ """
997
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
998
+
999
+ discriminator_hidden_states = self.electra(
1000
+ input_ids,
1001
+ attention_mask=attention_mask,
1002
+ token_type_ids=token_type_ids,
1003
+ position_ids=position_ids,
1004
+ head_mask=head_mask,
1005
+ inputs_embeds=inputs_embeds,
1006
+ output_attentions=output_attentions,
1007
+ output_hidden_states=output_hidden_states,
1008
+ return_dict=return_dict,
1009
+ )
1010
+
1011
+ sequence_output = discriminator_hidden_states[0]
1012
+ logits = self.classifier(sequence_output)
1013
+
1014
+ loss = None
1015
+ if labels is not None:
1016
+ if self.config.problem_type is None:
1017
+ if self.num_labels == 1:
1018
+ self.config.problem_type = "regression"
1019
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1020
+ self.config.problem_type = "single_label_classification"
1021
+ else:
1022
+ self.config.problem_type = "multi_label_classification"
1023
+
1024
+ if self.config.problem_type == "regression":
1025
+ loss_fct = MSELoss()
1026
+ if self.num_labels == 1:
1027
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1028
+ else:
1029
+ loss = loss_fct(logits, labels)
1030
+ elif self.config.problem_type == "single_label_classification":
1031
+ loss_fct = CrossEntropyLoss()
1032
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1033
+ elif self.config.problem_type == "multi_label_classification":
1034
+ loss_fct = BCEWithLogitsLoss()
1035
+ loss = loss_fct(logits, labels)
1036
+
1037
+ if not return_dict:
1038
+ output = (logits,) + discriminator_hidden_states[1:]
1039
+ return ((loss,) + output) if loss is not None else output
1040
+
1041
+ return SequenceClassifierOutput(
1042
+ loss=loss,
1043
+ logits=logits,
1044
+ hidden_states=discriminator_hidden_states.hidden_states,
1045
+ attentions=discriminator_hidden_states.attentions,
1046
+ )
1047
+
1048
+
1049
+ @add_start_docstrings(
1050
+ """
1051
+ Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
1052
+
1053
+ It is recommended to load the discriminator checkpoint into that model.
1054
+ """,
1055
+ ELECTRA_START_DOCSTRING,
1056
+ )
1057
+ class ElectraForPreTraining(ElectraPreTrainedModel):
1058
+ def __init__(self, config):
1059
+ super().__init__(config)
1060
+
1061
+ self.electra = ElectraModel(config)
1062
+ self.discriminator_predictions = ElectraDiscriminatorPredictions(config)
1063
+ # Initialize weights and apply final processing
1064
+ self.post_init()
1065
+
1066
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1067
+ @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1068
+ def forward(
1069
+ self,
1070
+ input_ids: Optional[torch.Tensor] = None,
1071
+ attention_mask: Optional[torch.Tensor] = None,
1072
+ token_type_ids: Optional[torch.Tensor] = None,
1073
+ position_ids: Optional[torch.Tensor] = None,
1074
+ head_mask: Optional[torch.Tensor] = None,
1075
+ inputs_embeds: Optional[torch.Tensor] = None,
1076
+ labels: Optional[torch.Tensor] = None,
1077
+ output_attentions: Optional[bool] = None,
1078
+ output_hidden_states: Optional[bool] = None,
1079
+ return_dict: Optional[bool] = None,
1080
+ ) -> Union[Tuple[torch.Tensor], ElectraForPreTrainingOutput]:
1081
+ r"""
1082
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1083
+ Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see `input_ids` docstring)
1084
+ Indices should be in `[0, 1]`:
1085
+
1086
+ - 0 indicates the token is an original token,
1087
+ - 1 indicates the token was replaced.
1088
+
1089
+ Returns:
1090
+
1091
+ Examples:
1092
+
1093
+ ```python
1094
+ >>> from transformers import ElectraForPreTraining, AutoTokenizer
1095
+ >>> import torch
1096
+
1097
+ >>> discriminator = ElectraForPreTraining.from_pretrained("google/electra-base-discriminator")
1098
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-discriminator")
1099
+
1100
+ >>> sentence = "The quick brown fox jumps over the lazy dog"
1101
+ >>> fake_sentence = "The quick brown fox fake over the lazy dog"
1102
+
1103
+ >>> fake_tokens = tokenizer.tokenize(fake_sentence, add_special_tokens=True)
1104
+ >>> fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
1105
+ >>> discriminator_outputs = discriminator(fake_inputs)
1106
+ >>> predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2)
1107
+
1108
+ >>> fake_tokens
1109
+ ['[CLS]', 'the', 'quick', 'brown', 'fox', 'fake', 'over', 'the', 'lazy', 'dog', '[SEP]']
1110
+
1111
+ >>> predictions.squeeze().tolist()
1112
+ [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
1113
+ ```"""
1114
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1115
+
1116
+ discriminator_hidden_states = self.electra(
1117
+ input_ids,
1118
+ attention_mask=attention_mask,
1119
+ token_type_ids=token_type_ids,
1120
+ position_ids=position_ids,
1121
+ head_mask=head_mask,
1122
+ inputs_embeds=inputs_embeds,
1123
+ output_attentions=output_attentions,
1124
+ output_hidden_states=output_hidden_states,
1125
+ return_dict=return_dict,
1126
+ )
1127
+ discriminator_sequence_output = discriminator_hidden_states[0]
1128
+
1129
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1130
+
1131
+ loss = None
1132
+ if labels is not None:
1133
+ loss_fct = nn.BCEWithLogitsLoss()
1134
+ if attention_mask is not None:
1135
+ active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
1136
+ active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
1137
+ active_labels = labels[active_loss]
1138
+ loss = loss_fct(active_logits, active_labels.float())
1139
+ else:
1140
+ loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
1141
+
1142
+ if not return_dict:
1143
+ output = (logits,) + discriminator_hidden_states[1:]
1144
+ return ((loss,) + output) if loss is not None else output
1145
+
1146
+ return ElectraForPreTrainingOutput(
1147
+ loss=loss,
1148
+ logits=logits,
1149
+ hidden_states=discriminator_hidden_states.hidden_states,
1150
+ attentions=discriminator_hidden_states.attentions,
1151
+ )
1152
+
1153
+
1154
+ @add_start_docstrings(
1155
+ """
1156
+ Electra model with a language modeling head on top.
1157
+
1158
+ Even though both the discriminator and generator may be loaded into this model, the generator is the only model of
1159
+ the two to have been trained for the masked language modeling task.
1160
+ """,
1161
+ ELECTRA_START_DOCSTRING,
1162
+ )
1163
+ class ElectraForMaskedLM(ElectraPreTrainedModel):
1164
+ _tied_weights_keys = ["generator_lm_head.weight"]
1165
+
1166
+ def __init__(self, config):
1167
+ super().__init__(config)
1168
+
1169
+ self.electra = ElectraModel(config)
1170
+ self.generator_predictions = ElectraGeneratorPredictions(config)
1171
+
1172
+ self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
1173
+ # Initialize weights and apply final processing
1174
+ self.post_init()
1175
+
1176
+ def get_output_embeddings(self):
1177
+ return self.generator_lm_head
1178
+
1179
+ def set_output_embeddings(self, word_embeddings):
1180
+ self.generator_lm_head = word_embeddings
1181
+
1182
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1183
+ @add_code_sample_docstrings(
1184
+ checkpoint="google/electra-small-generator",
1185
+ output_type=MaskedLMOutput,
1186
+ config_class=_CONFIG_FOR_DOC,
1187
+ mask="[MASK]",
1188
+ expected_output="'paris'",
1189
+ expected_loss=1.22,
1190
+ )
1191
+ def forward(
1192
+ self,
1193
+ input_ids: Optional[torch.Tensor] = None,
1194
+ attention_mask: Optional[torch.Tensor] = None,
1195
+ token_type_ids: Optional[torch.Tensor] = None,
1196
+ position_ids: Optional[torch.Tensor] = None,
1197
+ head_mask: Optional[torch.Tensor] = None,
1198
+ inputs_embeds: Optional[torch.Tensor] = None,
1199
+ labels: Optional[torch.Tensor] = None,
1200
+ output_attentions: Optional[bool] = None,
1201
+ output_hidden_states: Optional[bool] = None,
1202
+ return_dict: Optional[bool] = None,
1203
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1204
+ r"""
1205
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1206
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1207
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1208
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1209
+ """
1210
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1211
+
1212
+ generator_hidden_states = self.electra(
1213
+ input_ids,
1214
+ attention_mask=attention_mask,
1215
+ token_type_ids=token_type_ids,
1216
+ position_ids=position_ids,
1217
+ head_mask=head_mask,
1218
+ inputs_embeds=inputs_embeds,
1219
+ output_attentions=output_attentions,
1220
+ output_hidden_states=output_hidden_states,
1221
+ return_dict=return_dict,
1222
+ )
1223
+ generator_sequence_output = generator_hidden_states[0]
1224
+
1225
+ prediction_scores = self.generator_predictions(generator_sequence_output)
1226
+ prediction_scores = self.generator_lm_head(prediction_scores)
1227
+
1228
+ loss = None
1229
+ # Masked language modeling softmax layer
1230
+ if labels is not None:
1231
+ loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
1232
+ loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1233
+
1234
+ if not return_dict:
1235
+ output = (prediction_scores,) + generator_hidden_states[1:]
1236
+ return ((loss,) + output) if loss is not None else output
1237
+
1238
+ return MaskedLMOutput(
1239
+ loss=loss,
1240
+ logits=prediction_scores,
1241
+ hidden_states=generator_hidden_states.hidden_states,
1242
+ attentions=generator_hidden_states.attentions,
1243
+ )
1244
+
1245
+
1246
+ @add_start_docstrings(
1247
+ """
1248
+ Electra model with a token classification head on top.
1249
+
1250
+ Both the discriminator and generator may be loaded into this model.
1251
+ """,
1252
+ ELECTRA_START_DOCSTRING,
1253
+ )
1254
+ class ElectraForTokenClassification(ElectraPreTrainedModel):
1255
+ def __init__(self, config):
1256
+ super().__init__(config)
1257
+ self.num_labels = config.num_labels
1258
+
1259
+ self.electra = ElectraModel(config)
1260
+ classifier_dropout = (
1261
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1262
+ )
1263
+ self.dropout = nn.Dropout(classifier_dropout)
1264
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1265
+ # Initialize weights and apply final processing
1266
+ self.post_init()
1267
+
1268
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1269
+ @add_code_sample_docstrings(
1270
+ checkpoint="bhadresh-savani/electra-base-discriminator-finetuned-conll03-english",
1271
+ output_type=TokenClassifierOutput,
1272
+ config_class=_CONFIG_FOR_DOC,
1273
+ expected_output="['B-LOC', 'B-ORG', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'O', 'B-LOC', 'I-LOC']",
1274
+ expected_loss=0.11,
1275
+ )
1276
+ def forward(
1277
+ self,
1278
+ input_ids: Optional[torch.Tensor] = None,
1279
+ attention_mask: Optional[torch.Tensor] = None,
1280
+ token_type_ids: Optional[torch.Tensor] = None,
1281
+ position_ids: Optional[torch.Tensor] = None,
1282
+ head_mask: Optional[torch.Tensor] = None,
1283
+ inputs_embeds: Optional[torch.Tensor] = None,
1284
+ labels: Optional[torch.Tensor] = None,
1285
+ output_attentions: Optional[bool] = None,
1286
+ output_hidden_states: Optional[bool] = None,
1287
+ return_dict: Optional[bool] = None,
1288
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1289
+ r"""
1290
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1291
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1292
+ """
1293
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1294
+
1295
+ discriminator_hidden_states = self.electra(
1296
+ input_ids,
1297
+ attention_mask=attention_mask,
1298
+ token_type_ids=token_type_ids,
1299
+ position_ids=position_ids,
1300
+ head_mask=head_mask,
1301
+ inputs_embeds=inputs_embeds,
1302
+ output_attentions=output_attentions,
1303
+ output_hidden_states=output_hidden_states,
1304
+ return_dict=return_dict,
1305
+ )
1306
+ discriminator_sequence_output = discriminator_hidden_states[0]
1307
+
1308
+ discriminator_sequence_output = self.dropout(discriminator_sequence_output)
1309
+ logits = self.classifier(discriminator_sequence_output)
1310
+
1311
+ loss = None
1312
+ if labels is not None:
1313
+ loss_fct = CrossEntropyLoss()
1314
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1315
+
1316
+ if not return_dict:
1317
+ output = (logits,) + discriminator_hidden_states[1:]
1318
+ return ((loss,) + output) if loss is not None else output
1319
+
1320
+ return TokenClassifierOutput(
1321
+ loss=loss,
1322
+ logits=logits,
1323
+ hidden_states=discriminator_hidden_states.hidden_states,
1324
+ attentions=discriminator_hidden_states.attentions,
1325
+ )
1326
+
1327
+
1328
+ @add_start_docstrings(
1329
+ """
1330
+ ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1331
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1332
+ """,
1333
+ ELECTRA_START_DOCSTRING,
1334
+ )
1335
+ class ElectraForQuestionAnswering(ElectraPreTrainedModel):
1336
+ config_class = ElectraConfig
1337
+ base_model_prefix = "electra"
1338
+
1339
+ def __init__(self, config):
1340
+ super().__init__(config)
1341
+ self.num_labels = config.num_labels
1342
+
1343
+ self.electra = ElectraModel(config)
1344
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1345
+
1346
+ # Initialize weights and apply final processing
1347
+ self.post_init()
1348
+
1349
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1350
+ @add_code_sample_docstrings(
1351
+ checkpoint="bhadresh-savani/electra-base-squad2",
1352
+ output_type=QuestionAnsweringModelOutput,
1353
+ config_class=_CONFIG_FOR_DOC,
1354
+ qa_target_start_index=11,
1355
+ qa_target_end_index=12,
1356
+ expected_output="'a nice puppet'",
1357
+ expected_loss=2.64,
1358
+ )
1359
+ def forward(
1360
+ self,
1361
+ input_ids: Optional[torch.Tensor] = None,
1362
+ attention_mask: Optional[torch.Tensor] = None,
1363
+ token_type_ids: Optional[torch.Tensor] = None,
1364
+ position_ids: Optional[torch.Tensor] = None,
1365
+ head_mask: Optional[torch.Tensor] = None,
1366
+ inputs_embeds: Optional[torch.Tensor] = None,
1367
+ start_positions: Optional[torch.Tensor] = None,
1368
+ end_positions: Optional[torch.Tensor] = None,
1369
+ output_attentions: Optional[bool] = None,
1370
+ output_hidden_states: Optional[bool] = None,
1371
+ return_dict: Optional[bool] = None,
1372
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1373
+ r"""
1374
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1375
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1376
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1377
+ are not taken into account for computing the loss.
1378
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1379
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1380
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1381
+ are not taken into account for computing the loss.
1382
+ """
1383
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1384
+
1385
+ discriminator_hidden_states = self.electra(
1386
+ input_ids,
1387
+ attention_mask=attention_mask,
1388
+ token_type_ids=token_type_ids,
1389
+ position_ids=position_ids,
1390
+ head_mask=head_mask,
1391
+ inputs_embeds=inputs_embeds,
1392
+ output_attentions=output_attentions,
1393
+ output_hidden_states=output_hidden_states,
1394
+ )
1395
+
1396
+ sequence_output = discriminator_hidden_states[0]
1397
+
1398
+ logits = self.qa_outputs(sequence_output)
1399
+ start_logits, end_logits = logits.split(1, dim=-1)
1400
+ start_logits = start_logits.squeeze(-1).contiguous()
1401
+ end_logits = end_logits.squeeze(-1).contiguous()
1402
+
1403
+ total_loss = None
1404
+ if start_positions is not None and end_positions is not None:
1405
+ # If we are on multi-GPU, split add a dimension
1406
+ if len(start_positions.size()) > 1:
1407
+ start_positions = start_positions.squeeze(-1)
1408
+ if len(end_positions.size()) > 1:
1409
+ end_positions = end_positions.squeeze(-1)
1410
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1411
+ ignored_index = start_logits.size(1)
1412
+ start_positions = start_positions.clamp(0, ignored_index)
1413
+ end_positions = end_positions.clamp(0, ignored_index)
1414
+
1415
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1416
+ start_loss = loss_fct(start_logits, start_positions)
1417
+ end_loss = loss_fct(end_logits, end_positions)
1418
+ total_loss = (start_loss + end_loss) / 2
1419
+
1420
+ if not return_dict:
1421
+ output = (
1422
+ start_logits,
1423
+ end_logits,
1424
+ ) + discriminator_hidden_states[1:]
1425
+ return ((total_loss,) + output) if total_loss is not None else output
1426
+
1427
+ return QuestionAnsweringModelOutput(
1428
+ loss=total_loss,
1429
+ start_logits=start_logits,
1430
+ end_logits=end_logits,
1431
+ hidden_states=discriminator_hidden_states.hidden_states,
1432
+ attentions=discriminator_hidden_states.attentions,
1433
+ )
1434
+
1435
+
1436
+ @add_start_docstrings(
1437
+ """
1438
+ ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1439
+ softmax) e.g. for RocStories/SWAG tasks.
1440
+ """,
1441
+ ELECTRA_START_DOCSTRING,
1442
+ )
1443
+ class ElectraForMultipleChoice(ElectraPreTrainedModel):
1444
+ def __init__(self, config):
1445
+ super().__init__(config)
1446
+
1447
+ self.electra = ElectraModel(config)
1448
+ self.sequence_summary = SequenceSummary(config)
1449
+ self.classifier = nn.Linear(config.hidden_size, 1)
1450
+
1451
+ # Initialize weights and apply final processing
1452
+ self.post_init()
1453
+
1454
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1455
+ @add_code_sample_docstrings(
1456
+ checkpoint=_CHECKPOINT_FOR_DOC,
1457
+ output_type=MultipleChoiceModelOutput,
1458
+ config_class=_CONFIG_FOR_DOC,
1459
+ )
1460
+ def forward(
1461
+ self,
1462
+ input_ids: Optional[torch.Tensor] = None,
1463
+ attention_mask: Optional[torch.Tensor] = None,
1464
+ token_type_ids: Optional[torch.Tensor] = None,
1465
+ position_ids: Optional[torch.Tensor] = None,
1466
+ head_mask: Optional[torch.Tensor] = None,
1467
+ inputs_embeds: Optional[torch.Tensor] = None,
1468
+ labels: Optional[torch.Tensor] = None,
1469
+ output_attentions: Optional[bool] = None,
1470
+ output_hidden_states: Optional[bool] = None,
1471
+ return_dict: Optional[bool] = None,
1472
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1473
+ r"""
1474
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1475
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1476
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1477
+ `input_ids` above)
1478
+ """
1479
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1480
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1481
+
1482
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1483
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1484
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1485
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1486
+ inputs_embeds = (
1487
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1488
+ if inputs_embeds is not None
1489
+ else None
1490
+ )
1491
+
1492
+ discriminator_hidden_states = self.electra(
1493
+ input_ids,
1494
+ attention_mask=attention_mask,
1495
+ token_type_ids=token_type_ids,
1496
+ position_ids=position_ids,
1497
+ head_mask=head_mask,
1498
+ inputs_embeds=inputs_embeds,
1499
+ output_attentions=output_attentions,
1500
+ output_hidden_states=output_hidden_states,
1501
+ return_dict=return_dict,
1502
+ )
1503
+
1504
+ sequence_output = discriminator_hidden_states[0]
1505
+
1506
+ pooled_output = self.sequence_summary(sequence_output)
1507
+ logits = self.classifier(pooled_output)
1508
+ reshaped_logits = logits.view(-1, num_choices)
1509
+
1510
+ loss = None
1511
+ if labels is not None:
1512
+ loss_fct = CrossEntropyLoss()
1513
+ loss = loss_fct(reshaped_logits, labels)
1514
+
1515
+ if not return_dict:
1516
+ output = (reshaped_logits,) + discriminator_hidden_states[1:]
1517
+ return ((loss,) + output) if loss is not None else output
1518
+
1519
+ return MultipleChoiceModelOutput(
1520
+ loss=loss,
1521
+ logits=reshaped_logits,
1522
+ hidden_states=discriminator_hidden_states.hidden_states,
1523
+ attentions=discriminator_hidden_states.attentions,
1524
+ )
1525
+
1526
+
1527
+ @add_start_docstrings(
1528
+ """ELECTRA Model with a `language modeling` head on top for CLM fine-tuning.""", ELECTRA_START_DOCSTRING
1529
+ )
1530
+ class ElectraForCausalLM(ElectraPreTrainedModel):
1531
+ _tied_weights_keys = ["generator_lm_head.weight"]
1532
+
1533
+ def __init__(self, config):
1534
+ super().__init__(config)
1535
+
1536
+ if not config.is_decoder:
1537
+ logger.warning("If you want to use `ElectraForCausalLM` as a standalone, add `is_decoder=True.`")
1538
+
1539
+ self.electra = ElectraModel(config)
1540
+ self.generator_predictions = ElectraGeneratorPredictions(config)
1541
+ self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
1542
+
1543
+ self.init_weights()
1544
+
1545
+ def get_output_embeddings(self):
1546
+ return self.generator_lm_head
1547
+
1548
+ def set_output_embeddings(self, new_embeddings):
1549
+ self.generator_lm_head = new_embeddings
1550
+
1551
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1552
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1553
+ def forward(
1554
+ self,
1555
+ input_ids: Optional[torch.Tensor] = None,
1556
+ attention_mask: Optional[torch.Tensor] = None,
1557
+ token_type_ids: Optional[torch.Tensor] = None,
1558
+ position_ids: Optional[torch.Tensor] = None,
1559
+ head_mask: Optional[torch.Tensor] = None,
1560
+ inputs_embeds: Optional[torch.Tensor] = None,
1561
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1562
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1563
+ labels: Optional[torch.Tensor] = None,
1564
+ past_key_values: Optional[List[torch.Tensor]] = None,
1565
+ use_cache: Optional[bool] = None,
1566
+ output_attentions: Optional[bool] = None,
1567
+ output_hidden_states: Optional[bool] = None,
1568
+ return_dict: Optional[bool] = None,
1569
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1570
+ r"""
1571
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1572
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1573
+ the model is configured as a decoder.
1574
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1575
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1576
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1577
+
1578
+ - 1 for tokens that are **not masked**,
1579
+ - 0 for tokens that are **masked**.
1580
+
1581
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1582
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1583
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1584
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1585
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1586
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1587
+
1588
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1589
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1590
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1591
+ use_cache (`bool`, *optional*):
1592
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1593
+ `past_key_values`).
1594
+
1595
+ Returns:
1596
+
1597
+ Example:
1598
+
1599
+ ```python
1600
+ >>> from transformers import AutoTokenizer, ElectraForCausalLM, ElectraConfig
1601
+ >>> import torch
1602
+
1603
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-generator")
1604
+ >>> config = ElectraConfig.from_pretrained("google/electra-base-generator")
1605
+ >>> config.is_decoder = True
1606
+ >>> model = ElectraForCausalLM.from_pretrained("google/electra-base-generator", config=config)
1607
+
1608
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1609
+ >>> outputs = model(**inputs)
1610
+
1611
+ >>> prediction_logits = outputs.logits
1612
+ ```"""
1613
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1614
+ if labels is not None:
1615
+ use_cache = False
1616
+
1617
+ outputs = self.electra(
1618
+ input_ids,
1619
+ attention_mask=attention_mask,
1620
+ token_type_ids=token_type_ids,
1621
+ position_ids=position_ids,
1622
+ head_mask=head_mask,
1623
+ inputs_embeds=inputs_embeds,
1624
+ encoder_hidden_states=encoder_hidden_states,
1625
+ encoder_attention_mask=encoder_attention_mask,
1626
+ past_key_values=past_key_values,
1627
+ use_cache=use_cache,
1628
+ output_attentions=output_attentions,
1629
+ output_hidden_states=output_hidden_states,
1630
+ return_dict=return_dict,
1631
+ )
1632
+
1633
+ sequence_output = outputs[0]
1634
+ prediction_scores = self.generator_lm_head(self.generator_predictions(sequence_output))
1635
+
1636
+ lm_loss = None
1637
+ if labels is not None:
1638
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1639
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1640
+ labels = labels[:, 1:].contiguous()
1641
+ loss_fct = CrossEntropyLoss()
1642
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1643
+
1644
+ if not return_dict:
1645
+ output = (prediction_scores,) + outputs[1:]
1646
+ return ((lm_loss,) + output) if lm_loss is not None else output
1647
+
1648
+ return CausalLMOutputWithCrossAttentions(
1649
+ loss=lm_loss,
1650
+ logits=prediction_scores,
1651
+ past_key_values=outputs.past_key_values,
1652
+ hidden_states=outputs.hidden_states,
1653
+ attentions=outputs.attentions,
1654
+ cross_attentions=outputs.cross_attentions,
1655
+ )
1656
+
1657
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM.prepare_inputs_for_generation
1658
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
1659
+ input_shape = input_ids.shape
1660
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1661
+ if attention_mask is None:
1662
+ attention_mask = input_ids.new_ones(input_shape)
1663
+
1664
+ # cut decoder_input_ids if past_key_values is used
1665
+ if past_key_values is not None:
1666
+ past_length = past_key_values[0][0].shape[2]
1667
+
1668
+ # Some generation methods already pass only the last input ID
1669
+ if input_ids.shape[1] > past_length:
1670
+ remove_prefix_length = past_length
1671
+ else:
1672
+ # Default to old behavior: keep only final ID
1673
+ remove_prefix_length = input_ids.shape[1] - 1
1674
+
1675
+ input_ids = input_ids[:, remove_prefix_length:]
1676
+
1677
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1678
+
1679
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM._reorder_cache
1680
+ def _reorder_cache(self, past_key_values, beam_idx):
1681
+ reordered_past = ()
1682
+ for layer_past in past_key_values:
1683
+ reordered_past += (
1684
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1685
+ )
1686
+ return reordered_past
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/modeling_flax_electra.py ADDED
@@ -0,0 +1,1601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Callable, Optional, Tuple
17
+
18
+ import flax
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen import combine_masks, make_causal_mask
25
+ from flax.linen import partitioning as nn_partitioning
26
+ from flax.linen.attention import dot_product_attention_weights
27
+ from flax.traverse_util import flatten_dict, unflatten_dict
28
+ from jax import lax
29
+
30
+ from ...modeling_flax_outputs import (
31
+ FlaxBaseModelOutput,
32
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
33
+ FlaxCausalLMOutputWithCrossAttentions,
34
+ FlaxMaskedLMOutput,
35
+ FlaxMultipleChoiceModelOutput,
36
+ FlaxQuestionAnsweringModelOutput,
37
+ FlaxSequenceClassifierOutput,
38
+ FlaxTokenClassifierOutput,
39
+ )
40
+ from ...modeling_flax_utils import (
41
+ ACT2FN,
42
+ FlaxPreTrainedModel,
43
+ append_call_sample_docstring,
44
+ append_replace_return_docstrings,
45
+ overwrite_call_docstring,
46
+ )
47
+ from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
48
+ from .configuration_electra import ElectraConfig
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator"
54
+ _CONFIG_FOR_DOC = "ElectraConfig"
55
+
56
+ remat = nn_partitioning.remat
57
+
58
+
59
+ @flax.struct.dataclass
60
+ class FlaxElectraForPreTrainingOutput(ModelOutput):
61
+ """
62
+ Output type of [`ElectraForPreTraining`].
63
+
64
+ Args:
65
+ logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
66
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
67
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
68
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
69
+ `(batch_size, sequence_length, hidden_size)`.
70
+
71
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
72
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
73
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
74
+ sequence_length)`.
75
+
76
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
77
+ heads.
78
+ """
79
+
80
+ logits: jnp.ndarray = None
81
+ hidden_states: Optional[Tuple[jnp.ndarray]] = None
82
+ attentions: Optional[Tuple[jnp.ndarray]] = None
83
+
84
+
85
+ ELECTRA_START_DOCSTRING = r"""
86
+
87
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
88
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
89
+
90
+ This model is also a Flax Linen
91
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
92
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
93
+
94
+ Finally, this model supports inherent JAX features such as:
95
+
96
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
97
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
98
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
99
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
100
+
101
+ Parameters:
102
+ config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.
103
+ Initializing with a config file does not load the weights associated with the model, only the
104
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
105
+ """
106
+
107
+ ELECTRA_INPUTS_DOCSTRING = r"""
108
+ Args:
109
+ input_ids (`numpy.ndarray` of shape `({0})`):
110
+ Indices of input sequence tokens in the vocabulary.
111
+
112
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
113
+ [`PreTrainedTokenizer.__call__`] for details.
114
+
115
+ [What are input IDs?](../glossary#input-ids)
116
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
117
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
118
+
119
+ - 1 for tokens that are **not masked**,
120
+ - 0 for tokens that are **masked**.
121
+
122
+ [What are attention masks?](../glossary#attention-mask)
123
+ token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
124
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
125
+ 1]`:
126
+
127
+ - 0 corresponds to a *sentence A* token,
128
+ - 1 corresponds to a *sentence B* token.
129
+
130
+ [What are token type IDs?](../glossary#token-type-ids)
131
+ position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
132
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
133
+ config.max_position_embeddings - 1]`.
134
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
135
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
136
+
137
+ - 1 indicates the head is **not masked**,
138
+ - 0 indicates the head is **masked**.
139
+
140
+ return_dict (`bool`, *optional*):
141
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
142
+
143
+ """
144
+
145
+
146
+ class FlaxElectraEmbeddings(nn.Module):
147
+ """Construct the embeddings from word, position and token_type embeddings."""
148
+
149
+ config: ElectraConfig
150
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
151
+
152
+ def setup(self):
153
+ self.word_embeddings = nn.Embed(
154
+ self.config.vocab_size,
155
+ self.config.embedding_size,
156
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
157
+ )
158
+ self.position_embeddings = nn.Embed(
159
+ self.config.max_position_embeddings,
160
+ self.config.embedding_size,
161
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
162
+ )
163
+ self.token_type_embeddings = nn.Embed(
164
+ self.config.type_vocab_size,
165
+ self.config.embedding_size,
166
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
167
+ )
168
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
169
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
170
+
171
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings.__call__
172
+ def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
173
+ # Embed
174
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
175
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
176
+ token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
177
+
178
+ # Sum all embeddings
179
+ hidden_states = inputs_embeds + token_type_embeddings + position_embeds
180
+
181
+ # Layer Norm
182
+ hidden_states = self.LayerNorm(hidden_states)
183
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
184
+ return hidden_states
185
+
186
+
187
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->Electra
188
+ class FlaxElectraSelfAttention(nn.Module):
189
+ config: ElectraConfig
190
+ causal: bool = False
191
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
192
+
193
+ def setup(self):
194
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
195
+ if self.config.hidden_size % self.config.num_attention_heads != 0:
196
+ raise ValueError(
197
+ "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
198
+ " : {self.config.num_attention_heads}"
199
+ )
200
+
201
+ self.query = nn.Dense(
202
+ self.config.hidden_size,
203
+ dtype=self.dtype,
204
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
205
+ )
206
+ self.key = nn.Dense(
207
+ self.config.hidden_size,
208
+ dtype=self.dtype,
209
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
210
+ )
211
+ self.value = nn.Dense(
212
+ self.config.hidden_size,
213
+ dtype=self.dtype,
214
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
215
+ )
216
+
217
+ if self.causal:
218
+ self.causal_mask = make_causal_mask(
219
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
220
+ )
221
+
222
+ def _split_heads(self, hidden_states):
223
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
224
+
225
+ def _merge_heads(self, hidden_states):
226
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
227
+
228
+ @nn.compact
229
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
230
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
231
+ """
232
+ This function takes projected key, value states from a single input token and concatenates the states to cached
233
+ states from previous steps. This function is slighly adapted from the official Flax repository:
234
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
235
+ """
236
+ # detect if we're initializing by absence of existing cache data.
237
+ is_initialized = self.has_variable("cache", "cached_key")
238
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
239
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
240
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
241
+
242
+ if is_initialized:
243
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
244
+ # update key, value caches with our new 1d spatial slices
245
+ cur_index = cache_index.value
246
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
247
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
248
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
249
+ cached_key.value = key
250
+ cached_value.value = value
251
+ num_updated_cache_vectors = query.shape[1]
252
+ cache_index.value = cache_index.value + num_updated_cache_vectors
253
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
254
+ pad_mask = jnp.broadcast_to(
255
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
256
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
257
+ )
258
+ attention_mask = combine_masks(pad_mask, attention_mask)
259
+ return key, value, attention_mask
260
+
261
+ def __call__(
262
+ self,
263
+ hidden_states,
264
+ attention_mask,
265
+ layer_head_mask,
266
+ key_value_states: Optional[jnp.ndarray] = None,
267
+ init_cache: bool = False,
268
+ deterministic=True,
269
+ output_attentions: bool = False,
270
+ ):
271
+ # if key_value_states are provided this layer is used as a cross-attention layer
272
+ # for the decoder
273
+ is_cross_attention = key_value_states is not None
274
+ batch_size = hidden_states.shape[0]
275
+
276
+ # get query proj
277
+ query_states = self.query(hidden_states)
278
+ # get key, value proj
279
+ if is_cross_attention:
280
+ # cross_attentions
281
+ key_states = self.key(key_value_states)
282
+ value_states = self.value(key_value_states)
283
+ else:
284
+ # self_attention
285
+ key_states = self.key(hidden_states)
286
+ value_states = self.value(hidden_states)
287
+
288
+ query_states = self._split_heads(query_states)
289
+ key_states = self._split_heads(key_states)
290
+ value_states = self._split_heads(value_states)
291
+
292
+ # handle cache prepare causal attention mask
293
+ if self.causal:
294
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
295
+ if self.has_variable("cache", "cached_key"):
296
+ mask_shift = self.variables["cache"]["cache_index"]
297
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
298
+ causal_mask = lax.dynamic_slice(
299
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
300
+ )
301
+ else:
302
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
303
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
304
+
305
+ # combine masks if needed
306
+ if attention_mask is not None and self.causal:
307
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
308
+ attention_mask = combine_masks(attention_mask, causal_mask)
309
+ elif self.causal:
310
+ attention_mask = causal_mask
311
+ elif attention_mask is not None:
312
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
313
+
314
+ # During fast autoregressive decoding, we feed one position at a time,
315
+ # and cache the keys and values step by step.
316
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
317
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
318
+ key_states, value_states, query_states, attention_mask
319
+ )
320
+
321
+ # Convert the boolean attention mask to an attention bias.
322
+ if attention_mask is not None:
323
+ # attention mask in the form of attention bias
324
+ attention_bias = lax.select(
325
+ attention_mask > 0,
326
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
327
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
328
+ )
329
+ else:
330
+ attention_bias = None
331
+
332
+ dropout_rng = None
333
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
334
+ dropout_rng = self.make_rng("dropout")
335
+
336
+ attn_weights = dot_product_attention_weights(
337
+ query_states,
338
+ key_states,
339
+ bias=attention_bias,
340
+ dropout_rng=dropout_rng,
341
+ dropout_rate=self.config.attention_probs_dropout_prob,
342
+ broadcast_dropout=True,
343
+ deterministic=deterministic,
344
+ dtype=self.dtype,
345
+ precision=None,
346
+ )
347
+
348
+ # Mask heads if we want to
349
+ if layer_head_mask is not None:
350
+ attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
351
+
352
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
353
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
354
+
355
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
356
+ return outputs
357
+
358
+
359
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->Electra
360
+ class FlaxElectraSelfOutput(nn.Module):
361
+ config: ElectraConfig
362
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
363
+
364
+ def setup(self):
365
+ self.dense = nn.Dense(
366
+ self.config.hidden_size,
367
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
368
+ dtype=self.dtype,
369
+ )
370
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
371
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
372
+
373
+ def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
374
+ hidden_states = self.dense(hidden_states)
375
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
376
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
377
+ return hidden_states
378
+
379
+
380
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->Electra
381
+ class FlaxElectraAttention(nn.Module):
382
+ config: ElectraConfig
383
+ causal: bool = False
384
+ dtype: jnp.dtype = jnp.float32
385
+
386
+ def setup(self):
387
+ self.self = FlaxElectraSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
388
+ self.output = FlaxElectraSelfOutput(self.config, dtype=self.dtype)
389
+
390
+ def __call__(
391
+ self,
392
+ hidden_states,
393
+ attention_mask,
394
+ layer_head_mask,
395
+ key_value_states=None,
396
+ init_cache=False,
397
+ deterministic=True,
398
+ output_attentions: bool = False,
399
+ ):
400
+ # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
401
+ # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
402
+ # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
403
+ attn_outputs = self.self(
404
+ hidden_states,
405
+ attention_mask,
406
+ layer_head_mask=layer_head_mask,
407
+ key_value_states=key_value_states,
408
+ init_cache=init_cache,
409
+ deterministic=deterministic,
410
+ output_attentions=output_attentions,
411
+ )
412
+ attn_output = attn_outputs[0]
413
+ hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
414
+
415
+ outputs = (hidden_states,)
416
+
417
+ if output_attentions:
418
+ outputs += (attn_outputs[1],)
419
+
420
+ return outputs
421
+
422
+
423
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->Electra
424
+ class FlaxElectraIntermediate(nn.Module):
425
+ config: ElectraConfig
426
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
427
+
428
+ def setup(self):
429
+ self.dense = nn.Dense(
430
+ self.config.intermediate_size,
431
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
432
+ dtype=self.dtype,
433
+ )
434
+ self.activation = ACT2FN[self.config.hidden_act]
435
+
436
+ def __call__(self, hidden_states):
437
+ hidden_states = self.dense(hidden_states)
438
+ hidden_states = self.activation(hidden_states)
439
+ return hidden_states
440
+
441
+
442
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->Electra
443
+ class FlaxElectraOutput(nn.Module):
444
+ config: ElectraConfig
445
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
446
+
447
+ def setup(self):
448
+ self.dense = nn.Dense(
449
+ self.config.hidden_size,
450
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
451
+ dtype=self.dtype,
452
+ )
453
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
454
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
455
+
456
+ def __call__(self, hidden_states, attention_output, deterministic: bool = True):
457
+ hidden_states = self.dense(hidden_states)
458
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
459
+ hidden_states = self.LayerNorm(hidden_states + attention_output)
460
+ return hidden_states
461
+
462
+
463
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->Electra
464
+ class FlaxElectraLayer(nn.Module):
465
+ config: ElectraConfig
466
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
467
+
468
+ def setup(self):
469
+ self.attention = FlaxElectraAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
470
+ self.intermediate = FlaxElectraIntermediate(self.config, dtype=self.dtype)
471
+ self.output = FlaxElectraOutput(self.config, dtype=self.dtype)
472
+ if self.config.add_cross_attention:
473
+ self.crossattention = FlaxElectraAttention(self.config, causal=False, dtype=self.dtype)
474
+
475
+ def __call__(
476
+ self,
477
+ hidden_states,
478
+ attention_mask,
479
+ layer_head_mask,
480
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
481
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
482
+ init_cache: bool = False,
483
+ deterministic: bool = True,
484
+ output_attentions: bool = False,
485
+ ):
486
+ # Self Attention
487
+ attention_outputs = self.attention(
488
+ hidden_states,
489
+ attention_mask,
490
+ layer_head_mask=layer_head_mask,
491
+ init_cache=init_cache,
492
+ deterministic=deterministic,
493
+ output_attentions=output_attentions,
494
+ )
495
+ attention_output = attention_outputs[0]
496
+
497
+ # Cross-Attention Block
498
+ if encoder_hidden_states is not None:
499
+ cross_attention_outputs = self.crossattention(
500
+ attention_output,
501
+ attention_mask=encoder_attention_mask,
502
+ layer_head_mask=layer_head_mask,
503
+ key_value_states=encoder_hidden_states,
504
+ deterministic=deterministic,
505
+ output_attentions=output_attentions,
506
+ )
507
+ attention_output = cross_attention_outputs[0]
508
+
509
+ hidden_states = self.intermediate(attention_output)
510
+ hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
511
+
512
+ outputs = (hidden_states,)
513
+
514
+ if output_attentions:
515
+ outputs += (attention_outputs[1],)
516
+ if encoder_hidden_states is not None:
517
+ outputs += (cross_attention_outputs[1],)
518
+ return outputs
519
+
520
+
521
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->Electra
522
+ class FlaxElectraLayerCollection(nn.Module):
523
+ config: ElectraConfig
524
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
525
+ gradient_checkpointing: bool = False
526
+
527
+ def setup(self):
528
+ if self.gradient_checkpointing:
529
+ FlaxElectraCheckpointLayer = remat(FlaxElectraLayer, static_argnums=(5, 6, 7))
530
+ self.layers = [
531
+ FlaxElectraCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
532
+ for i in range(self.config.num_hidden_layers)
533
+ ]
534
+ else:
535
+ self.layers = [
536
+ FlaxElectraLayer(self.config, name=str(i), dtype=self.dtype)
537
+ for i in range(self.config.num_hidden_layers)
538
+ ]
539
+
540
+ def __call__(
541
+ self,
542
+ hidden_states,
543
+ attention_mask,
544
+ head_mask,
545
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
546
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
547
+ init_cache: bool = False,
548
+ deterministic: bool = True,
549
+ output_attentions: bool = False,
550
+ output_hidden_states: bool = False,
551
+ return_dict: bool = True,
552
+ ):
553
+ all_attentions = () if output_attentions else None
554
+ all_hidden_states = () if output_hidden_states else None
555
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
556
+
557
+ # Check if head_mask has a correct number of layers specified if desired
558
+ if head_mask is not None:
559
+ if head_mask.shape[0] != (len(self.layers)):
560
+ raise ValueError(
561
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
562
+ f" {head_mask.shape[0]}."
563
+ )
564
+
565
+ for i, layer in enumerate(self.layers):
566
+ if output_hidden_states:
567
+ all_hidden_states += (hidden_states,)
568
+
569
+ layer_outputs = layer(
570
+ hidden_states,
571
+ attention_mask,
572
+ head_mask[i] if head_mask is not None else None,
573
+ encoder_hidden_states,
574
+ encoder_attention_mask,
575
+ init_cache,
576
+ deterministic,
577
+ output_attentions,
578
+ )
579
+
580
+ hidden_states = layer_outputs[0]
581
+
582
+ if output_attentions:
583
+ all_attentions += (layer_outputs[1],)
584
+
585
+ if encoder_hidden_states is not None:
586
+ all_cross_attentions += (layer_outputs[2],)
587
+
588
+ if output_hidden_states:
589
+ all_hidden_states += (hidden_states,)
590
+
591
+ outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
592
+
593
+ if not return_dict:
594
+ return tuple(v for v in outputs if v is not None)
595
+
596
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
597
+ last_hidden_state=hidden_states,
598
+ hidden_states=all_hidden_states,
599
+ attentions=all_attentions,
600
+ cross_attentions=all_cross_attentions,
601
+ )
602
+
603
+
604
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->Electra
605
+ class FlaxElectraEncoder(nn.Module):
606
+ config: ElectraConfig
607
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
608
+ gradient_checkpointing: bool = False
609
+
610
+ def setup(self):
611
+ self.layer = FlaxElectraLayerCollection(
612
+ self.config,
613
+ dtype=self.dtype,
614
+ gradient_checkpointing=self.gradient_checkpointing,
615
+ )
616
+
617
+ def __call__(
618
+ self,
619
+ hidden_states,
620
+ attention_mask,
621
+ head_mask,
622
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
623
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
624
+ init_cache: bool = False,
625
+ deterministic: bool = True,
626
+ output_attentions: bool = False,
627
+ output_hidden_states: bool = False,
628
+ return_dict: bool = True,
629
+ ):
630
+ return self.layer(
631
+ hidden_states,
632
+ attention_mask,
633
+ head_mask=head_mask,
634
+ encoder_hidden_states=encoder_hidden_states,
635
+ encoder_attention_mask=encoder_attention_mask,
636
+ init_cache=init_cache,
637
+ deterministic=deterministic,
638
+ output_attentions=output_attentions,
639
+ output_hidden_states=output_hidden_states,
640
+ return_dict=return_dict,
641
+ )
642
+
643
+
644
+ class FlaxElectraGeneratorPredictions(nn.Module):
645
+ config: ElectraConfig
646
+ dtype: jnp.dtype = jnp.float32
647
+
648
+ def setup(self):
649
+ self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
650
+ self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype)
651
+
652
+ def __call__(self, hidden_states):
653
+ hidden_states = self.dense(hidden_states)
654
+ hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
655
+ hidden_states = self.LayerNorm(hidden_states)
656
+ return hidden_states
657
+
658
+
659
+ class FlaxElectraDiscriminatorPredictions(nn.Module):
660
+ """Prediction module for the discriminator, made up of two dense layers."""
661
+
662
+ config: ElectraConfig
663
+ dtype: jnp.dtype = jnp.float32
664
+
665
+ def setup(self):
666
+ self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
667
+ self.dense_prediction = nn.Dense(1, dtype=self.dtype)
668
+
669
+ def __call__(self, hidden_states):
670
+ hidden_states = self.dense(hidden_states)
671
+ hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
672
+ hidden_states = self.dense_prediction(hidden_states).squeeze(-1)
673
+ return hidden_states
674
+
675
+
676
+ class FlaxElectraPreTrainedModel(FlaxPreTrainedModel):
677
+ """
678
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
679
+ models.
680
+ """
681
+
682
+ config_class = ElectraConfig
683
+ base_model_prefix = "electra"
684
+ module_class: nn.Module = None
685
+
686
+ def __init__(
687
+ self,
688
+ config: ElectraConfig,
689
+ input_shape: Tuple = (1, 1),
690
+ seed: int = 0,
691
+ dtype: jnp.dtype = jnp.float32,
692
+ _do_init: bool = True,
693
+ gradient_checkpointing: bool = False,
694
+ **kwargs,
695
+ ):
696
+ module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs)
697
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
698
+
699
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing
700
+ def enable_gradient_checkpointing(self):
701
+ self._module = self.module_class(
702
+ config=self.config,
703
+ dtype=self.dtype,
704
+ gradient_checkpointing=True,
705
+ )
706
+
707
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.init_weights
708
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
709
+ # init input tensors
710
+ input_ids = jnp.zeros(input_shape, dtype="i4")
711
+ token_type_ids = jnp.zeros_like(input_ids)
712
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
713
+ attention_mask = jnp.ones_like(input_ids)
714
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
715
+
716
+ params_rng, dropout_rng = jax.random.split(rng)
717
+ rngs = {"params": params_rng, "dropout": dropout_rng}
718
+
719
+ if self.config.add_cross_attention:
720
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
721
+ encoder_attention_mask = attention_mask
722
+ module_init_outputs = self.module.init(
723
+ rngs,
724
+ input_ids,
725
+ attention_mask,
726
+ token_type_ids,
727
+ position_ids,
728
+ head_mask,
729
+ encoder_hidden_states,
730
+ encoder_attention_mask,
731
+ return_dict=False,
732
+ )
733
+ else:
734
+ module_init_outputs = self.module.init(
735
+ rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
736
+ )
737
+
738
+ random_params = module_init_outputs["params"]
739
+
740
+ if params is not None:
741
+ random_params = flatten_dict(unfreeze(random_params))
742
+ params = flatten_dict(unfreeze(params))
743
+ for missing_key in self._missing_keys:
744
+ params[missing_key] = random_params[missing_key]
745
+ self._missing_keys = set()
746
+ return freeze(unflatten_dict(params))
747
+ else:
748
+ return random_params
749
+
750
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
751
+ def init_cache(self, batch_size, max_length):
752
+ r"""
753
+ Args:
754
+ batch_size (`int`):
755
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
756
+ max_length (`int`):
757
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
758
+ cache.
759
+ """
760
+ # init input variables to retrieve cache
761
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
762
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
763
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
764
+
765
+ init_variables = self.module.init(
766
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
767
+ )
768
+ return unfreeze(init_variables["cache"])
769
+
770
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
771
+ def __call__(
772
+ self,
773
+ input_ids,
774
+ attention_mask=None,
775
+ token_type_ids=None,
776
+ position_ids=None,
777
+ head_mask=None,
778
+ encoder_hidden_states=None,
779
+ encoder_attention_mask=None,
780
+ params: dict = None,
781
+ dropout_rng: jax.random.PRNGKey = None,
782
+ train: bool = False,
783
+ output_attentions: Optional[bool] = None,
784
+ output_hidden_states: Optional[bool] = None,
785
+ return_dict: Optional[bool] = None,
786
+ past_key_values: dict = None,
787
+ ):
788
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
789
+ output_hidden_states = (
790
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
791
+ )
792
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
793
+
794
+ # init input tensors if not passed
795
+ if token_type_ids is None:
796
+ token_type_ids = jnp.ones_like(input_ids)
797
+
798
+ if position_ids is None:
799
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
800
+
801
+ if attention_mask is None:
802
+ attention_mask = jnp.ones_like(input_ids)
803
+
804
+ if head_mask is None:
805
+ head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
806
+
807
+ # Handle any PRNG if needed
808
+ rngs = {}
809
+ if dropout_rng is not None:
810
+ rngs["dropout"] = dropout_rng
811
+
812
+ inputs = {"params": params or self.params}
813
+
814
+ if self.config.add_cross_attention:
815
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
816
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
817
+ # changed by FlaxElectraAttention module
818
+ if past_key_values:
819
+ inputs["cache"] = past_key_values
820
+ mutable = ["cache"]
821
+ else:
822
+ mutable = False
823
+
824
+ outputs = self.module.apply(
825
+ inputs,
826
+ jnp.array(input_ids, dtype="i4"),
827
+ jnp.array(attention_mask, dtype="i4"),
828
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
829
+ position_ids=jnp.array(position_ids, dtype="i4"),
830
+ head_mask=jnp.array(head_mask, dtype="i4"),
831
+ encoder_hidden_states=encoder_hidden_states,
832
+ encoder_attention_mask=encoder_attention_mask,
833
+ deterministic=not train,
834
+ output_attentions=output_attentions,
835
+ output_hidden_states=output_hidden_states,
836
+ return_dict=return_dict,
837
+ rngs=rngs,
838
+ mutable=mutable,
839
+ )
840
+
841
+ # add updated cache to model output
842
+ if past_key_values is not None and return_dict:
843
+ outputs, past_key_values = outputs
844
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
845
+ return outputs
846
+ elif past_key_values is not None and not return_dict:
847
+ outputs, past_key_values = outputs
848
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
849
+
850
+ else:
851
+ outputs = self.module.apply(
852
+ inputs,
853
+ jnp.array(input_ids, dtype="i4"),
854
+ jnp.array(attention_mask, dtype="i4"),
855
+ token_type_ids=jnp.array(token_type_ids, dtype="i4"),
856
+ position_ids=jnp.array(position_ids, dtype="i4"),
857
+ head_mask=jnp.array(head_mask, dtype="i4"),
858
+ deterministic=not train,
859
+ output_attentions=output_attentions,
860
+ output_hidden_states=output_hidden_states,
861
+ return_dict=return_dict,
862
+ rngs=rngs,
863
+ )
864
+
865
+ return outputs
866
+
867
+
868
+ class FlaxElectraModule(nn.Module):
869
+ config: ElectraConfig
870
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
871
+ gradient_checkpointing: bool = False
872
+
873
+ def setup(self):
874
+ self.embeddings = FlaxElectraEmbeddings(self.config, dtype=self.dtype)
875
+ if self.config.embedding_size != self.config.hidden_size:
876
+ self.embeddings_project = nn.Dense(self.config.hidden_size, dtype=self.dtype)
877
+ self.encoder = FlaxElectraEncoder(
878
+ self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
879
+ )
880
+
881
+ def __call__(
882
+ self,
883
+ input_ids,
884
+ attention_mask,
885
+ token_type_ids,
886
+ position_ids,
887
+ head_mask: Optional[np.ndarray] = None,
888
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
889
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
890
+ init_cache: bool = False,
891
+ deterministic: bool = True,
892
+ output_attentions: bool = False,
893
+ output_hidden_states: bool = False,
894
+ return_dict: bool = True,
895
+ ):
896
+ embeddings = self.embeddings(
897
+ input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
898
+ )
899
+ if hasattr(self, "embeddings_project"):
900
+ embeddings = self.embeddings_project(embeddings)
901
+
902
+ return self.encoder(
903
+ embeddings,
904
+ attention_mask,
905
+ head_mask=head_mask,
906
+ deterministic=deterministic,
907
+ encoder_hidden_states=encoder_hidden_states,
908
+ encoder_attention_mask=encoder_attention_mask,
909
+ init_cache=init_cache,
910
+ output_attentions=output_attentions,
911
+ output_hidden_states=output_hidden_states,
912
+ return_dict=return_dict,
913
+ )
914
+
915
+
916
+ @add_start_docstrings(
917
+ "The bare Electra Model transformer outputting raw hidden-states without any specific head on top.",
918
+ ELECTRA_START_DOCSTRING,
919
+ )
920
+ class FlaxElectraModel(FlaxElectraPreTrainedModel):
921
+ module_class = FlaxElectraModule
922
+
923
+
924
+ append_call_sample_docstring(FlaxElectraModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC)
925
+
926
+
927
+ class FlaxElectraTiedDense(nn.Module):
928
+ embedding_size: int
929
+ dtype: jnp.dtype = jnp.float32
930
+ precision = None
931
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
932
+
933
+ def setup(self):
934
+ self.bias = self.param("bias", self.bias_init, (self.embedding_size,))
935
+
936
+ def __call__(self, x, kernel):
937
+ x = jnp.asarray(x, self.dtype)
938
+ kernel = jnp.asarray(kernel, self.dtype)
939
+ y = lax.dot_general(
940
+ x,
941
+ kernel,
942
+ (((x.ndim - 1,), (0,)), ((), ())),
943
+ precision=self.precision,
944
+ )
945
+ bias = jnp.asarray(self.bias, self.dtype)
946
+ return y + bias
947
+
948
+
949
+ class FlaxElectraForMaskedLMModule(nn.Module):
950
+ config: ElectraConfig
951
+ dtype: jnp.dtype = jnp.float32
952
+ gradient_checkpointing: bool = False
953
+
954
+ def setup(self):
955
+ self.electra = FlaxElectraModule(
956
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
957
+ )
958
+ self.generator_predictions = FlaxElectraGeneratorPredictions(config=self.config, dtype=self.dtype)
959
+ if self.config.tie_word_embeddings:
960
+ self.generator_lm_head = FlaxElectraTiedDense(self.config.vocab_size, dtype=self.dtype)
961
+ else:
962
+ self.generator_lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype)
963
+
964
+ def __call__(
965
+ self,
966
+ input_ids,
967
+ attention_mask=None,
968
+ token_type_ids=None,
969
+ position_ids=None,
970
+ head_mask=None,
971
+ deterministic: bool = True,
972
+ output_attentions: bool = False,
973
+ output_hidden_states: bool = False,
974
+ return_dict: bool = True,
975
+ ):
976
+ outputs = self.electra(
977
+ input_ids,
978
+ attention_mask,
979
+ token_type_ids,
980
+ position_ids,
981
+ head_mask,
982
+ deterministic=deterministic,
983
+ output_attentions=output_attentions,
984
+ output_hidden_states=output_hidden_states,
985
+ return_dict=return_dict,
986
+ )
987
+ hidden_states = outputs[0]
988
+ prediction_scores = self.generator_predictions(hidden_states)
989
+
990
+ if self.config.tie_word_embeddings:
991
+ shared_embedding = self.electra.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
992
+ prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T)
993
+ else:
994
+ prediction_scores = self.generator_lm_head(prediction_scores)
995
+
996
+ if not return_dict:
997
+ return (prediction_scores,) + outputs[1:]
998
+
999
+ return FlaxMaskedLMOutput(
1000
+ logits=prediction_scores,
1001
+ hidden_states=outputs.hidden_states,
1002
+ attentions=outputs.attentions,
1003
+ )
1004
+
1005
+
1006
+ @add_start_docstrings("""Electra Model with a `language modeling` head on top.""", ELECTRA_START_DOCSTRING)
1007
+ class FlaxElectraForMaskedLM(FlaxElectraPreTrainedModel):
1008
+ module_class = FlaxElectraForMaskedLMModule
1009
+
1010
+
1011
+ append_call_sample_docstring(FlaxElectraForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
1012
+
1013
+
1014
+ class FlaxElectraForPreTrainingModule(nn.Module):
1015
+ config: ElectraConfig
1016
+ dtype: jnp.dtype = jnp.float32
1017
+ gradient_checkpointing: bool = False
1018
+
1019
+ def setup(self):
1020
+ self.electra = FlaxElectraModule(
1021
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1022
+ )
1023
+ self.discriminator_predictions = FlaxElectraDiscriminatorPredictions(config=self.config, dtype=self.dtype)
1024
+
1025
+ def __call__(
1026
+ self,
1027
+ input_ids,
1028
+ attention_mask=None,
1029
+ token_type_ids=None,
1030
+ position_ids=None,
1031
+ head_mask=None,
1032
+ deterministic: bool = True,
1033
+ output_attentions: bool = False,
1034
+ output_hidden_states: bool = False,
1035
+ return_dict: bool = True,
1036
+ ):
1037
+ # Model
1038
+ outputs = self.electra(
1039
+ input_ids,
1040
+ attention_mask,
1041
+ token_type_ids,
1042
+ position_ids,
1043
+ head_mask,
1044
+ deterministic=deterministic,
1045
+ output_attentions=output_attentions,
1046
+ output_hidden_states=output_hidden_states,
1047
+ return_dict=return_dict,
1048
+ )
1049
+ hidden_states = outputs[0]
1050
+
1051
+ logits = self.discriminator_predictions(hidden_states)
1052
+
1053
+ if not return_dict:
1054
+ return (logits,) + outputs[1:]
1055
+
1056
+ return FlaxElectraForPreTrainingOutput(
1057
+ logits=logits,
1058
+ hidden_states=outputs.hidden_states,
1059
+ attentions=outputs.attentions,
1060
+ )
1061
+
1062
+
1063
+ @add_start_docstrings(
1064
+ """
1065
+ Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
1066
+
1067
+ It is recommended to load the discriminator checkpoint into that model.
1068
+ """,
1069
+ ELECTRA_START_DOCSTRING,
1070
+ )
1071
+ class FlaxElectraForPreTraining(FlaxElectraPreTrainedModel):
1072
+ module_class = FlaxElectraForPreTrainingModule
1073
+
1074
+
1075
+ FLAX_ELECTRA_FOR_PRETRAINING_DOCSTRING = """
1076
+ Returns:
1077
+
1078
+ Example:
1079
+
1080
+ ```python
1081
+ >>> from transformers import AutoTokenizer, FlaxElectraForPreTraining
1082
+
1083
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-small-discriminator")
1084
+ >>> model = FlaxElectraForPreTraining.from_pretrained("google/electra-small-discriminator")
1085
+
1086
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
1087
+ >>> outputs = model(**inputs)
1088
+
1089
+ >>> prediction_logits = outputs.logits
1090
+ ```
1091
+ """
1092
+
1093
+ overwrite_call_docstring(
1094
+ FlaxElectraForPreTraining,
1095
+ ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_ELECTRA_FOR_PRETRAINING_DOCSTRING,
1096
+ )
1097
+ append_replace_return_docstrings(
1098
+ FlaxElectraForPreTraining, output_type=FlaxElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
1099
+ )
1100
+
1101
+
1102
+ class FlaxElectraForTokenClassificationModule(nn.Module):
1103
+ config: ElectraConfig
1104
+ dtype: jnp.dtype = jnp.float32
1105
+ gradient_checkpointing: bool = False
1106
+
1107
+ def setup(self):
1108
+ self.electra = FlaxElectraModule(
1109
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1110
+ )
1111
+ classifier_dropout = (
1112
+ self.config.classifier_dropout
1113
+ if self.config.classifier_dropout is not None
1114
+ else self.config.hidden_dropout_prob
1115
+ )
1116
+ self.dropout = nn.Dropout(classifier_dropout)
1117
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
1118
+
1119
+ def __call__(
1120
+ self,
1121
+ input_ids,
1122
+ attention_mask=None,
1123
+ token_type_ids=None,
1124
+ position_ids=None,
1125
+ head_mask=None,
1126
+ deterministic: bool = True,
1127
+ output_attentions: bool = False,
1128
+ output_hidden_states: bool = False,
1129
+ return_dict: bool = True,
1130
+ ):
1131
+ # Model
1132
+ outputs = self.electra(
1133
+ input_ids,
1134
+ attention_mask,
1135
+ token_type_ids,
1136
+ position_ids,
1137
+ head_mask,
1138
+ deterministic=deterministic,
1139
+ output_attentions=output_attentions,
1140
+ output_hidden_states=output_hidden_states,
1141
+ return_dict=return_dict,
1142
+ )
1143
+ hidden_states = outputs[0]
1144
+
1145
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
1146
+ logits = self.classifier(hidden_states)
1147
+
1148
+ if not return_dict:
1149
+ return (logits,) + outputs[1:]
1150
+
1151
+ return FlaxTokenClassifierOutput(
1152
+ logits=logits,
1153
+ hidden_states=outputs.hidden_states,
1154
+ attentions=outputs.attentions,
1155
+ )
1156
+
1157
+
1158
+ @add_start_docstrings(
1159
+ """
1160
+ Electra model with a token classification head on top.
1161
+
1162
+ Both the discriminator and generator may be loaded into this model.
1163
+ """,
1164
+ ELECTRA_START_DOCSTRING,
1165
+ )
1166
+ class FlaxElectraForTokenClassification(FlaxElectraPreTrainedModel):
1167
+ module_class = FlaxElectraForTokenClassificationModule
1168
+
1169
+
1170
+ append_call_sample_docstring(
1171
+ FlaxElectraForTokenClassification,
1172
+ _CHECKPOINT_FOR_DOC,
1173
+ FlaxTokenClassifierOutput,
1174
+ _CONFIG_FOR_DOC,
1175
+ )
1176
+
1177
+
1178
+ def identity(x, **kwargs):
1179
+ return x
1180
+
1181
+
1182
+ class FlaxElectraSequenceSummary(nn.Module):
1183
+ r"""
1184
+ Compute a single vector summary of a sequence hidden states.
1185
+
1186
+ Args:
1187
+ config ([`PretrainedConfig`]):
1188
+ The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
1189
+ config class of your model for the default values it uses):
1190
+
1191
+ - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
1192
+ - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
1193
+ (otherwise to `config.hidden_size`).
1194
+ - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
1195
+ another string or `None` will add no activation.
1196
+ - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
1197
+ - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
1198
+ """
1199
+
1200
+ config: ElectraConfig
1201
+ dtype: jnp.dtype = jnp.float32
1202
+
1203
+ def setup(self):
1204
+ self.summary = identity
1205
+ if hasattr(self.config, "summary_use_proj") and self.config.summary_use_proj:
1206
+ if (
1207
+ hasattr(self.config, "summary_proj_to_labels")
1208
+ and self.config.summary_proj_to_labels
1209
+ and self.config.num_labels > 0
1210
+ ):
1211
+ num_classes = self.config.num_labels
1212
+ else:
1213
+ num_classes = self.config.hidden_size
1214
+ self.summary = nn.Dense(num_classes, dtype=self.dtype)
1215
+
1216
+ activation_string = getattr(self.config, "summary_activation", None)
1217
+ self.activation = ACT2FN[activation_string] if activation_string else lambda x: x # noqa F407
1218
+
1219
+ self.first_dropout = identity
1220
+ if hasattr(self.config, "summary_first_dropout") and self.config.summary_first_dropout > 0:
1221
+ self.first_dropout = nn.Dropout(self.config.summary_first_dropout)
1222
+
1223
+ self.last_dropout = identity
1224
+ if hasattr(self.config, "summary_last_dropout") and self.config.summary_last_dropout > 0:
1225
+ self.last_dropout = nn.Dropout(self.config.summary_last_dropout)
1226
+
1227
+ def __call__(self, hidden_states, cls_index=None, deterministic: bool = True):
1228
+ """
1229
+ Compute a single vector summary of a sequence hidden states.
1230
+
1231
+ Args:
1232
+ hidden_states (`jnp.ndarray` of shape `[batch_size, seq_len, hidden_size]`):
1233
+ The hidden states of the last layer.
1234
+ cls_index (`jnp.ndarray` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
1235
+ Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
1236
+
1237
+ Returns:
1238
+ `jnp.ndarray`: The summary of the sequence hidden states.
1239
+ """
1240
+ # NOTE: this doest "first" type summary always
1241
+ output = hidden_states[:, 0]
1242
+ output = self.first_dropout(output, deterministic=deterministic)
1243
+ output = self.summary(output)
1244
+ output = self.activation(output)
1245
+ output = self.last_dropout(output, deterministic=deterministic)
1246
+ return output
1247
+
1248
+
1249
+ class FlaxElectraForMultipleChoiceModule(nn.Module):
1250
+ config: ElectraConfig
1251
+ dtype: jnp.dtype = jnp.float32
1252
+ gradient_checkpointing: bool = False
1253
+
1254
+ def setup(self):
1255
+ self.electra = FlaxElectraModule(
1256
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1257
+ )
1258
+ self.sequence_summary = FlaxElectraSequenceSummary(config=self.config, dtype=self.dtype)
1259
+ self.classifier = nn.Dense(1, dtype=self.dtype)
1260
+
1261
+ def __call__(
1262
+ self,
1263
+ input_ids,
1264
+ attention_mask=None,
1265
+ token_type_ids=None,
1266
+ position_ids=None,
1267
+ head_mask=None,
1268
+ deterministic: bool = True,
1269
+ output_attentions: bool = False,
1270
+ output_hidden_states: bool = False,
1271
+ return_dict: bool = True,
1272
+ ):
1273
+ num_choices = input_ids.shape[1]
1274
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
1275
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
1276
+ token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
1277
+ position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
1278
+
1279
+ # Model
1280
+ outputs = self.electra(
1281
+ input_ids,
1282
+ attention_mask,
1283
+ token_type_ids,
1284
+ position_ids,
1285
+ head_mask,
1286
+ deterministic=deterministic,
1287
+ output_attentions=output_attentions,
1288
+ output_hidden_states=output_hidden_states,
1289
+ return_dict=return_dict,
1290
+ )
1291
+ hidden_states = outputs[0]
1292
+ pooled_output = self.sequence_summary(hidden_states, deterministic=deterministic)
1293
+ logits = self.classifier(pooled_output)
1294
+
1295
+ reshaped_logits = logits.reshape(-1, num_choices)
1296
+
1297
+ if not return_dict:
1298
+ return (reshaped_logits,) + outputs[1:]
1299
+
1300
+ return FlaxMultipleChoiceModelOutput(
1301
+ logits=reshaped_logits,
1302
+ hidden_states=outputs.hidden_states,
1303
+ attentions=outputs.attentions,
1304
+ )
1305
+
1306
+
1307
+ @add_start_docstrings(
1308
+ """
1309
+ ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1310
+ softmax) e.g. for RocStories/SWAG tasks.
1311
+ """,
1312
+ ELECTRA_START_DOCSTRING,
1313
+ )
1314
+ class FlaxElectraForMultipleChoice(FlaxElectraPreTrainedModel):
1315
+ module_class = FlaxElectraForMultipleChoiceModule
1316
+
1317
+
1318
+ # adapt docstring slightly for FlaxElectraForMultipleChoice
1319
+ overwrite_call_docstring(
1320
+ FlaxElectraForMultipleChoice, ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1321
+ )
1322
+ append_call_sample_docstring(
1323
+ FlaxElectraForMultipleChoice,
1324
+ _CHECKPOINT_FOR_DOC,
1325
+ FlaxMultipleChoiceModelOutput,
1326
+ _CONFIG_FOR_DOC,
1327
+ )
1328
+
1329
+
1330
+ class FlaxElectraForQuestionAnsweringModule(nn.Module):
1331
+ config: ElectraConfig
1332
+ dtype: jnp.dtype = jnp.float32
1333
+ gradient_checkpointing: bool = False
1334
+
1335
+ def setup(self):
1336
+ self.electra = FlaxElectraModule(
1337
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1338
+ )
1339
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
1340
+
1341
+ def __call__(
1342
+ self,
1343
+ input_ids,
1344
+ attention_mask=None,
1345
+ token_type_ids=None,
1346
+ position_ids=None,
1347
+ head_mask=None,
1348
+ deterministic: bool = True,
1349
+ output_attentions: bool = False,
1350
+ output_hidden_states: bool = False,
1351
+ return_dict: bool = True,
1352
+ ):
1353
+ # Model
1354
+ outputs = self.electra(
1355
+ input_ids,
1356
+ attention_mask,
1357
+ token_type_ids,
1358
+ position_ids,
1359
+ head_mask,
1360
+ deterministic=deterministic,
1361
+ output_attentions=output_attentions,
1362
+ output_hidden_states=output_hidden_states,
1363
+ return_dict=return_dict,
1364
+ )
1365
+ hidden_states = outputs[0]
1366
+ logits = self.qa_outputs(hidden_states)
1367
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
1368
+ start_logits = start_logits.squeeze(-1)
1369
+ end_logits = end_logits.squeeze(-1)
1370
+
1371
+ if not return_dict:
1372
+ return (start_logits, end_logits) + outputs[1:]
1373
+
1374
+ return FlaxQuestionAnsweringModelOutput(
1375
+ start_logits=start_logits,
1376
+ end_logits=end_logits,
1377
+ hidden_states=outputs.hidden_states,
1378
+ attentions=outputs.attentions,
1379
+ )
1380
+
1381
+
1382
+ @add_start_docstrings(
1383
+ """
1384
+ ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1385
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1386
+ """,
1387
+ ELECTRA_START_DOCSTRING,
1388
+ )
1389
+ class FlaxElectraForQuestionAnswering(FlaxElectraPreTrainedModel):
1390
+ module_class = FlaxElectraForQuestionAnsweringModule
1391
+
1392
+
1393
+ append_call_sample_docstring(
1394
+ FlaxElectraForQuestionAnswering,
1395
+ _CHECKPOINT_FOR_DOC,
1396
+ FlaxQuestionAnsweringModelOutput,
1397
+ _CONFIG_FOR_DOC,
1398
+ )
1399
+
1400
+
1401
+ class FlaxElectraClassificationHead(nn.Module):
1402
+ """Head for sentence-level classification tasks."""
1403
+
1404
+ config: ElectraConfig
1405
+ dtype: jnp.dtype = jnp.float32
1406
+
1407
+ def setup(self):
1408
+ self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
1409
+ classifier_dropout = (
1410
+ self.config.classifier_dropout
1411
+ if self.config.classifier_dropout is not None
1412
+ else self.config.hidden_dropout_prob
1413
+ )
1414
+ self.dropout = nn.Dropout(classifier_dropout)
1415
+ self.out_proj = nn.Dense(self.config.num_labels, dtype=self.dtype)
1416
+
1417
+ def __call__(self, hidden_states, deterministic: bool = True):
1418
+ x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
1419
+ x = self.dropout(x, deterministic=deterministic)
1420
+ x = self.dense(x)
1421
+ x = ACT2FN["gelu"](x) # although BERT uses tanh here, it seems Electra authors used gelu
1422
+ x = self.dropout(x, deterministic=deterministic)
1423
+ x = self.out_proj(x)
1424
+ return x
1425
+
1426
+
1427
+ class FlaxElectraForSequenceClassificationModule(nn.Module):
1428
+ config: ElectraConfig
1429
+ dtype: jnp.dtype = jnp.float32
1430
+ gradient_checkpointing: bool = False
1431
+
1432
+ def setup(self):
1433
+ self.electra = FlaxElectraModule(
1434
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1435
+ )
1436
+ self.classifier = FlaxElectraClassificationHead(config=self.config, dtype=self.dtype)
1437
+
1438
+ def __call__(
1439
+ self,
1440
+ input_ids,
1441
+ attention_mask=None,
1442
+ token_type_ids=None,
1443
+ position_ids=None,
1444
+ head_mask=None,
1445
+ deterministic: bool = True,
1446
+ output_attentions: bool = False,
1447
+ output_hidden_states: bool = False,
1448
+ return_dict: bool = True,
1449
+ ):
1450
+ # Model
1451
+ outputs = self.electra(
1452
+ input_ids,
1453
+ attention_mask,
1454
+ token_type_ids,
1455
+ position_ids,
1456
+ head_mask,
1457
+ deterministic=deterministic,
1458
+ output_attentions=output_attentions,
1459
+ output_hidden_states=output_hidden_states,
1460
+ return_dict=return_dict,
1461
+ )
1462
+ hidden_states = outputs[0]
1463
+ logits = self.classifier(hidden_states, deterministic=deterministic)
1464
+
1465
+ if not return_dict:
1466
+ return (logits,) + outputs[1:]
1467
+
1468
+ return FlaxSequenceClassifierOutput(
1469
+ logits=logits,
1470
+ hidden_states=outputs.hidden_states,
1471
+ attentions=outputs.attentions,
1472
+ )
1473
+
1474
+
1475
+ @add_start_docstrings(
1476
+ """
1477
+ Electra Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1478
+ pooled output) e.g. for GLUE tasks.
1479
+ """,
1480
+ ELECTRA_START_DOCSTRING,
1481
+ )
1482
+ class FlaxElectraForSequenceClassification(FlaxElectraPreTrainedModel):
1483
+ module_class = FlaxElectraForSequenceClassificationModule
1484
+
1485
+
1486
+ append_call_sample_docstring(
1487
+ FlaxElectraForSequenceClassification,
1488
+ _CHECKPOINT_FOR_DOC,
1489
+ FlaxSequenceClassifierOutput,
1490
+ _CONFIG_FOR_DOC,
1491
+ )
1492
+
1493
+
1494
+ class FlaxElectraForCausalLMModule(nn.Module):
1495
+ config: ElectraConfig
1496
+ dtype: jnp.dtype = jnp.float32
1497
+ gradient_checkpointing: bool = False
1498
+
1499
+ def setup(self):
1500
+ self.electra = FlaxElectraModule(
1501
+ config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
1502
+ )
1503
+ self.generator_predictions = FlaxElectraGeneratorPredictions(config=self.config, dtype=self.dtype)
1504
+ if self.config.tie_word_embeddings:
1505
+ self.generator_lm_head = FlaxElectraTiedDense(self.config.vocab_size, dtype=self.dtype)
1506
+ else:
1507
+ self.generator_lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype)
1508
+
1509
+ def __call__(
1510
+ self,
1511
+ input_ids,
1512
+ attention_mask: Optional[jnp.ndarray] = None,
1513
+ token_type_ids: Optional[jnp.ndarray] = None,
1514
+ position_ids: Optional[jnp.ndarray] = None,
1515
+ head_mask: Optional[jnp.ndarray] = None,
1516
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
1517
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
1518
+ init_cache: bool = False,
1519
+ deterministic: bool = True,
1520
+ output_attentions: bool = False,
1521
+ output_hidden_states: bool = False,
1522
+ return_dict: bool = True,
1523
+ ):
1524
+ outputs = self.electra(
1525
+ input_ids,
1526
+ attention_mask,
1527
+ token_type_ids,
1528
+ position_ids,
1529
+ head_mask,
1530
+ encoder_hidden_states=encoder_hidden_states,
1531
+ encoder_attention_mask=encoder_attention_mask,
1532
+ init_cache=init_cache,
1533
+ deterministic=deterministic,
1534
+ output_attentions=output_attentions,
1535
+ output_hidden_states=output_hidden_states,
1536
+ return_dict=return_dict,
1537
+ )
1538
+ hidden_states = outputs[0]
1539
+ prediction_scores = self.generator_predictions(hidden_states)
1540
+
1541
+ if self.config.tie_word_embeddings:
1542
+ shared_embedding = self.electra.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
1543
+ prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T)
1544
+ else:
1545
+ prediction_scores = self.generator_lm_head(prediction_scores)
1546
+
1547
+ if not return_dict:
1548
+ return (prediction_scores,) + outputs[1:]
1549
+
1550
+ return FlaxCausalLMOutputWithCrossAttentions(
1551
+ logits=prediction_scores,
1552
+ hidden_states=outputs.hidden_states,
1553
+ attentions=outputs.attentions,
1554
+ cross_attentions=outputs.cross_attentions,
1555
+ )
1556
+
1557
+
1558
+ @add_start_docstrings(
1559
+ """
1560
+ Electra Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
1561
+ autoregressive tasks.
1562
+ """,
1563
+ ELECTRA_START_DOCSTRING,
1564
+ )
1565
+ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForCausalLM with Bert->Electra
1566
+ class FlaxElectraForCausalLM(FlaxElectraPreTrainedModel):
1567
+ module_class = FlaxElectraForCausalLMModule
1568
+
1569
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
1570
+ # initializing the cache
1571
+ batch_size, seq_length = input_ids.shape
1572
+
1573
+ past_key_values = self.init_cache(batch_size, max_length)
1574
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1575
+ # But since the decoder uses a causal mask, those positions are masked anyway.
1576
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
1577
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1578
+ if attention_mask is not None:
1579
+ position_ids = attention_mask.cumsum(axis=-1) - 1
1580
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
1581
+ else:
1582
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
1583
+
1584
+ return {
1585
+ "past_key_values": past_key_values,
1586
+ "attention_mask": extended_attention_mask,
1587
+ "position_ids": position_ids,
1588
+ }
1589
+
1590
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
1591
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
1592
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
1593
+ return model_kwargs
1594
+
1595
+
1596
+ append_call_sample_docstring(
1597
+ FlaxElectraForCausalLM,
1598
+ _CHECKPOINT_FOR_DOC,
1599
+ FlaxCausalLMOutputWithCrossAttentions,
1600
+ _CONFIG_FOR_DOC,
1601
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/modeling_tf_electra.py ADDED
@@ -0,0 +1,1775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF Electra model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutputWithPastAndCrossAttentions,
31
+ TFMaskedLMOutput,
32
+ TFMultipleChoiceModelOutput,
33
+ TFQuestionAnsweringModelOutput,
34
+ TFSequenceClassifierOutput,
35
+ TFTokenClassifierOutput,
36
+ )
37
+ from ...modeling_tf_utils import (
38
+ TFMaskedLanguageModelingLoss,
39
+ TFModelInputType,
40
+ TFMultipleChoiceLoss,
41
+ TFPreTrainedModel,
42
+ TFQuestionAnsweringLoss,
43
+ TFSequenceClassificationLoss,
44
+ TFSequenceSummary,
45
+ TFTokenClassificationLoss,
46
+ get_initializer,
47
+ keras,
48
+ keras_serializable,
49
+ unpack_inputs,
50
+ )
51
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
52
+ from ...utils import (
53
+ ModelOutput,
54
+ add_code_sample_docstrings,
55
+ add_start_docstrings,
56
+ add_start_docstrings_to_model_forward,
57
+ logging,
58
+ replace_return_docstrings,
59
+ )
60
+ from .configuration_electra import ElectraConfig
61
+
62
+
63
+ logger = logging.get_logger(__name__)
64
+
65
+ _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator"
66
+ _CONFIG_FOR_DOC = "ElectraConfig"
67
+
68
+ TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [
69
+ "google/electra-small-generator",
70
+ "google/electra-base-generator",
71
+ "google/electra-large-generator",
72
+ "google/electra-small-discriminator",
73
+ "google/electra-base-discriminator",
74
+ "google/electra-large-discriminator",
75
+ # See all ELECTRA models at https://huggingface.co/models?filter=electra
76
+ ]
77
+
78
+
79
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Electra
80
+ class TFElectraSelfAttention(keras.layers.Layer):
81
+ def __init__(self, config: ElectraConfig, **kwargs):
82
+ super().__init__(**kwargs)
83
+
84
+ if config.hidden_size % config.num_attention_heads != 0:
85
+ raise ValueError(
86
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
87
+ f"of attention heads ({config.num_attention_heads})"
88
+ )
89
+
90
+ self.num_attention_heads = config.num_attention_heads
91
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
92
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
93
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
94
+
95
+ self.query = keras.layers.Dense(
96
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
97
+ )
98
+ self.key = keras.layers.Dense(
99
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
100
+ )
101
+ self.value = keras.layers.Dense(
102
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
103
+ )
104
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
105
+
106
+ self.is_decoder = config.is_decoder
107
+ self.config = config
108
+
109
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
110
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
111
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
112
+
113
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
114
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
115
+
116
+ def call(
117
+ self,
118
+ hidden_states: tf.Tensor,
119
+ attention_mask: tf.Tensor,
120
+ head_mask: tf.Tensor,
121
+ encoder_hidden_states: tf.Tensor,
122
+ encoder_attention_mask: tf.Tensor,
123
+ past_key_value: Tuple[tf.Tensor],
124
+ output_attentions: bool,
125
+ training: bool = False,
126
+ ) -> Tuple[tf.Tensor]:
127
+ batch_size = shape_list(hidden_states)[0]
128
+ mixed_query_layer = self.query(inputs=hidden_states)
129
+
130
+ # If this is instantiated as a cross-attention module, the keys
131
+ # and values come from an encoder; the attention mask needs to be
132
+ # such that the encoder's padding tokens are not attended to.
133
+ is_cross_attention = encoder_hidden_states is not None
134
+
135
+ if is_cross_attention and past_key_value is not None:
136
+ # reuse k,v, cross_attentions
137
+ key_layer = past_key_value[0]
138
+ value_layer = past_key_value[1]
139
+ attention_mask = encoder_attention_mask
140
+ elif is_cross_attention:
141
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
142
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
143
+ attention_mask = encoder_attention_mask
144
+ elif past_key_value is not None:
145
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
146
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
147
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
148
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
149
+ else:
150
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
151
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
152
+
153
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
154
+
155
+ if self.is_decoder:
156
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
157
+ # Further calls to cross_attention layer can then reuse all cross-attention
158
+ # key/value_states (first "if" case)
159
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
160
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
161
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
162
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
163
+ past_key_value = (key_layer, value_layer)
164
+
165
+ # Take the dot product between "query" and "key" to get the raw attention scores.
166
+ # (batch size, num_heads, seq_len_q, seq_len_k)
167
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
168
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
169
+ attention_scores = tf.divide(attention_scores, dk)
170
+
171
+ if attention_mask is not None:
172
+ # Apply the attention mask is (precomputed for all layers in TFElectraModel call() function)
173
+ attention_scores = tf.add(attention_scores, attention_mask)
174
+
175
+ # Normalize the attention scores to probabilities.
176
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
177
+
178
+ # This is actually dropping out entire tokens to attend to, which might
179
+ # seem a bit unusual, but is taken from the original Transformer paper.
180
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
181
+
182
+ # Mask heads if we want to
183
+ if head_mask is not None:
184
+ attention_probs = tf.multiply(attention_probs, head_mask)
185
+
186
+ attention_output = tf.matmul(attention_probs, value_layer)
187
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
188
+
189
+ # (batch_size, seq_len_q, all_head_size)
190
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
191
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
192
+
193
+ if self.is_decoder:
194
+ outputs = outputs + (past_key_value,)
195
+ return outputs
196
+
197
+ def build(self, input_shape=None):
198
+ if self.built:
199
+ return
200
+ self.built = True
201
+ if getattr(self, "query", None) is not None:
202
+ with tf.name_scope(self.query.name):
203
+ self.query.build([None, None, self.config.hidden_size])
204
+ if getattr(self, "key", None) is not None:
205
+ with tf.name_scope(self.key.name):
206
+ self.key.build([None, None, self.config.hidden_size])
207
+ if getattr(self, "value", None) is not None:
208
+ with tf.name_scope(self.value.name):
209
+ self.value.build([None, None, self.config.hidden_size])
210
+
211
+
212
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Electra
213
+ class TFElectraSelfOutput(keras.layers.Layer):
214
+ def __init__(self, config: ElectraConfig, **kwargs):
215
+ super().__init__(**kwargs)
216
+
217
+ self.dense = keras.layers.Dense(
218
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
219
+ )
220
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
221
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
222
+ self.config = config
223
+
224
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
225
+ hidden_states = self.dense(inputs=hidden_states)
226
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
227
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
228
+
229
+ return hidden_states
230
+
231
+ def build(self, input_shape=None):
232
+ if self.built:
233
+ return
234
+ self.built = True
235
+ if getattr(self, "dense", None) is not None:
236
+ with tf.name_scope(self.dense.name):
237
+ self.dense.build([None, None, self.config.hidden_size])
238
+ if getattr(self, "LayerNorm", None) is not None:
239
+ with tf.name_scope(self.LayerNorm.name):
240
+ self.LayerNorm.build([None, None, self.config.hidden_size])
241
+
242
+
243
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Electra
244
+ class TFElectraAttention(keras.layers.Layer):
245
+ def __init__(self, config: ElectraConfig, **kwargs):
246
+ super().__init__(**kwargs)
247
+
248
+ self.self_attention = TFElectraSelfAttention(config, name="self")
249
+ self.dense_output = TFElectraSelfOutput(config, name="output")
250
+
251
+ def prune_heads(self, heads):
252
+ raise NotImplementedError
253
+
254
+ def call(
255
+ self,
256
+ input_tensor: tf.Tensor,
257
+ attention_mask: tf.Tensor,
258
+ head_mask: tf.Tensor,
259
+ encoder_hidden_states: tf.Tensor,
260
+ encoder_attention_mask: tf.Tensor,
261
+ past_key_value: Tuple[tf.Tensor],
262
+ output_attentions: bool,
263
+ training: bool = False,
264
+ ) -> Tuple[tf.Tensor]:
265
+ self_outputs = self.self_attention(
266
+ hidden_states=input_tensor,
267
+ attention_mask=attention_mask,
268
+ head_mask=head_mask,
269
+ encoder_hidden_states=encoder_hidden_states,
270
+ encoder_attention_mask=encoder_attention_mask,
271
+ past_key_value=past_key_value,
272
+ output_attentions=output_attentions,
273
+ training=training,
274
+ )
275
+ attention_output = self.dense_output(
276
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
277
+ )
278
+ # add attentions (possibly with past_key_value) if we output them
279
+ outputs = (attention_output,) + self_outputs[1:]
280
+
281
+ return outputs
282
+
283
+ def build(self, input_shape=None):
284
+ if self.built:
285
+ return
286
+ self.built = True
287
+ if getattr(self, "self_attention", None) is not None:
288
+ with tf.name_scope(self.self_attention.name):
289
+ self.self_attention.build(None)
290
+ if getattr(self, "dense_output", None) is not None:
291
+ with tf.name_scope(self.dense_output.name):
292
+ self.dense_output.build(None)
293
+
294
+
295
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Electra
296
+ class TFElectraIntermediate(keras.layers.Layer):
297
+ def __init__(self, config: ElectraConfig, **kwargs):
298
+ super().__init__(**kwargs)
299
+
300
+ self.dense = keras.layers.Dense(
301
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
302
+ )
303
+
304
+ if isinstance(config.hidden_act, str):
305
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
306
+ else:
307
+ self.intermediate_act_fn = config.hidden_act
308
+ self.config = config
309
+
310
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
311
+ hidden_states = self.dense(inputs=hidden_states)
312
+ hidden_states = self.intermediate_act_fn(hidden_states)
313
+
314
+ return hidden_states
315
+
316
+ def build(self, input_shape=None):
317
+ if self.built:
318
+ return
319
+ self.built = True
320
+ if getattr(self, "dense", None) is not None:
321
+ with tf.name_scope(self.dense.name):
322
+ self.dense.build([None, None, self.config.hidden_size])
323
+
324
+
325
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Electra
326
+ class TFElectraOutput(keras.layers.Layer):
327
+ def __init__(self, config: ElectraConfig, **kwargs):
328
+ super().__init__(**kwargs)
329
+
330
+ self.dense = keras.layers.Dense(
331
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
332
+ )
333
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
334
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
335
+ self.config = config
336
+
337
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
338
+ hidden_states = self.dense(inputs=hidden_states)
339
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
340
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
341
+
342
+ return hidden_states
343
+
344
+ def build(self, input_shape=None):
345
+ if self.built:
346
+ return
347
+ self.built = True
348
+ if getattr(self, "dense", None) is not None:
349
+ with tf.name_scope(self.dense.name):
350
+ self.dense.build([None, None, self.config.intermediate_size])
351
+ if getattr(self, "LayerNorm", None) is not None:
352
+ with tf.name_scope(self.LayerNorm.name):
353
+ self.LayerNorm.build([None, None, self.config.hidden_size])
354
+
355
+
356
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Electra
357
+ class TFElectraLayer(keras.layers.Layer):
358
+ def __init__(self, config: ElectraConfig, **kwargs):
359
+ super().__init__(**kwargs)
360
+
361
+ self.attention = TFElectraAttention(config, name="attention")
362
+ self.is_decoder = config.is_decoder
363
+ self.add_cross_attention = config.add_cross_attention
364
+ if self.add_cross_attention:
365
+ if not self.is_decoder:
366
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
367
+ self.crossattention = TFElectraAttention(config, name="crossattention")
368
+ self.intermediate = TFElectraIntermediate(config, name="intermediate")
369
+ self.bert_output = TFElectraOutput(config, name="output")
370
+
371
+ def call(
372
+ self,
373
+ hidden_states: tf.Tensor,
374
+ attention_mask: tf.Tensor,
375
+ head_mask: tf.Tensor,
376
+ encoder_hidden_states: tf.Tensor | None,
377
+ encoder_attention_mask: tf.Tensor | None,
378
+ past_key_value: Tuple[tf.Tensor] | None,
379
+ output_attentions: bool,
380
+ training: bool = False,
381
+ ) -> Tuple[tf.Tensor]:
382
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
383
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
384
+ self_attention_outputs = self.attention(
385
+ input_tensor=hidden_states,
386
+ attention_mask=attention_mask,
387
+ head_mask=head_mask,
388
+ encoder_hidden_states=None,
389
+ encoder_attention_mask=None,
390
+ past_key_value=self_attn_past_key_value,
391
+ output_attentions=output_attentions,
392
+ training=training,
393
+ )
394
+ attention_output = self_attention_outputs[0]
395
+
396
+ # if decoder, the last output is tuple of self-attn cache
397
+ if self.is_decoder:
398
+ outputs = self_attention_outputs[1:-1]
399
+ present_key_value = self_attention_outputs[-1]
400
+ else:
401
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
402
+
403
+ cross_attn_present_key_value = None
404
+ if self.is_decoder and encoder_hidden_states is not None:
405
+ if not hasattr(self, "crossattention"):
406
+ raise ValueError(
407
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
408
+ " by setting `config.add_cross_attention=True`"
409
+ )
410
+
411
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
412
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
413
+ cross_attention_outputs = self.crossattention(
414
+ input_tensor=attention_output,
415
+ attention_mask=attention_mask,
416
+ head_mask=head_mask,
417
+ encoder_hidden_states=encoder_hidden_states,
418
+ encoder_attention_mask=encoder_attention_mask,
419
+ past_key_value=cross_attn_past_key_value,
420
+ output_attentions=output_attentions,
421
+ training=training,
422
+ )
423
+ attention_output = cross_attention_outputs[0]
424
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
425
+
426
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
427
+ cross_attn_present_key_value = cross_attention_outputs[-1]
428
+ present_key_value = present_key_value + cross_attn_present_key_value
429
+
430
+ intermediate_output = self.intermediate(hidden_states=attention_output)
431
+ layer_output = self.bert_output(
432
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
433
+ )
434
+ outputs = (layer_output,) + outputs # add attentions if we output them
435
+
436
+ # if decoder, return the attn key/values as the last output
437
+ if self.is_decoder:
438
+ outputs = outputs + (present_key_value,)
439
+
440
+ return outputs
441
+
442
+ def build(self, input_shape=None):
443
+ if self.built:
444
+ return
445
+ self.built = True
446
+ if getattr(self, "attention", None) is not None:
447
+ with tf.name_scope(self.attention.name):
448
+ self.attention.build(None)
449
+ if getattr(self, "intermediate", None) is not None:
450
+ with tf.name_scope(self.intermediate.name):
451
+ self.intermediate.build(None)
452
+ if getattr(self, "bert_output", None) is not None:
453
+ with tf.name_scope(self.bert_output.name):
454
+ self.bert_output.build(None)
455
+ if getattr(self, "crossattention", None) is not None:
456
+ with tf.name_scope(self.crossattention.name):
457
+ self.crossattention.build(None)
458
+
459
+
460
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Electra
461
+ class TFElectraEncoder(keras.layers.Layer):
462
+ def __init__(self, config: ElectraConfig, **kwargs):
463
+ super().__init__(**kwargs)
464
+ self.config = config
465
+ self.layer = [TFElectraLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
466
+
467
+ def call(
468
+ self,
469
+ hidden_states: tf.Tensor,
470
+ attention_mask: tf.Tensor,
471
+ head_mask: tf.Tensor,
472
+ encoder_hidden_states: tf.Tensor | None,
473
+ encoder_attention_mask: tf.Tensor | None,
474
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
475
+ use_cache: Optional[bool],
476
+ output_attentions: bool,
477
+ output_hidden_states: bool,
478
+ return_dict: bool,
479
+ training: bool = False,
480
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
481
+ all_hidden_states = () if output_hidden_states else None
482
+ all_attentions = () if output_attentions else None
483
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
484
+
485
+ next_decoder_cache = () if use_cache else None
486
+ for i, layer_module in enumerate(self.layer):
487
+ if output_hidden_states:
488
+ all_hidden_states = all_hidden_states + (hidden_states,)
489
+
490
+ past_key_value = past_key_values[i] if past_key_values is not None else None
491
+
492
+ layer_outputs = layer_module(
493
+ hidden_states=hidden_states,
494
+ attention_mask=attention_mask,
495
+ head_mask=head_mask[i],
496
+ encoder_hidden_states=encoder_hidden_states,
497
+ encoder_attention_mask=encoder_attention_mask,
498
+ past_key_value=past_key_value,
499
+ output_attentions=output_attentions,
500
+ training=training,
501
+ )
502
+ hidden_states = layer_outputs[0]
503
+
504
+ if use_cache:
505
+ next_decoder_cache += (layer_outputs[-1],)
506
+
507
+ if output_attentions:
508
+ all_attentions = all_attentions + (layer_outputs[1],)
509
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
510
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
511
+
512
+ # Add last layer
513
+ if output_hidden_states:
514
+ all_hidden_states = all_hidden_states + (hidden_states,)
515
+
516
+ if not return_dict:
517
+ return tuple(
518
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
519
+ )
520
+
521
+ return TFBaseModelOutputWithPastAndCrossAttentions(
522
+ last_hidden_state=hidden_states,
523
+ past_key_values=next_decoder_cache,
524
+ hidden_states=all_hidden_states,
525
+ attentions=all_attentions,
526
+ cross_attentions=all_cross_attentions,
527
+ )
528
+
529
+ def build(self, input_shape=None):
530
+ if self.built:
531
+ return
532
+ self.built = True
533
+ if getattr(self, "layer", None) is not None:
534
+ for layer in self.layer:
535
+ with tf.name_scope(layer.name):
536
+ layer.build(None)
537
+
538
+
539
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Electra
540
+ class TFElectraPooler(keras.layers.Layer):
541
+ def __init__(self, config: ElectraConfig, **kwargs):
542
+ super().__init__(**kwargs)
543
+
544
+ self.dense = keras.layers.Dense(
545
+ units=config.hidden_size,
546
+ kernel_initializer=get_initializer(config.initializer_range),
547
+ activation="tanh",
548
+ name="dense",
549
+ )
550
+ self.config = config
551
+
552
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
553
+ # We "pool" the model by simply taking the hidden state corresponding
554
+ # to the first token.
555
+ first_token_tensor = hidden_states[:, 0]
556
+ pooled_output = self.dense(inputs=first_token_tensor)
557
+
558
+ return pooled_output
559
+
560
+ def build(self, input_shape=None):
561
+ if self.built:
562
+ return
563
+ self.built = True
564
+ if getattr(self, "dense", None) is not None:
565
+ with tf.name_scope(self.dense.name):
566
+ self.dense.build([None, None, self.config.hidden_size])
567
+
568
+
569
+ # Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->Electra
570
+ class TFElectraEmbeddings(keras.layers.Layer):
571
+ """Construct the embeddings from word, position and token_type embeddings."""
572
+
573
+ def __init__(self, config: ElectraConfig, **kwargs):
574
+ super().__init__(**kwargs)
575
+
576
+ self.config = config
577
+ self.embedding_size = config.embedding_size
578
+ self.max_position_embeddings = config.max_position_embeddings
579
+ self.initializer_range = config.initializer_range
580
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
581
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
582
+
583
+ def build(self, input_shape=None):
584
+ with tf.name_scope("word_embeddings"):
585
+ self.weight = self.add_weight(
586
+ name="weight",
587
+ shape=[self.config.vocab_size, self.embedding_size],
588
+ initializer=get_initializer(self.initializer_range),
589
+ )
590
+
591
+ with tf.name_scope("token_type_embeddings"):
592
+ self.token_type_embeddings = self.add_weight(
593
+ name="embeddings",
594
+ shape=[self.config.type_vocab_size, self.embedding_size],
595
+ initializer=get_initializer(self.initializer_range),
596
+ )
597
+
598
+ with tf.name_scope("position_embeddings"):
599
+ self.position_embeddings = self.add_weight(
600
+ name="embeddings",
601
+ shape=[self.max_position_embeddings, self.embedding_size],
602
+ initializer=get_initializer(self.initializer_range),
603
+ )
604
+
605
+ if self.built:
606
+ return
607
+ self.built = True
608
+ if getattr(self, "LayerNorm", None) is not None:
609
+ with tf.name_scope(self.LayerNorm.name):
610
+ self.LayerNorm.build([None, None, self.config.embedding_size])
611
+
612
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
613
+ def call(
614
+ self,
615
+ input_ids: tf.Tensor = None,
616
+ position_ids: tf.Tensor = None,
617
+ token_type_ids: tf.Tensor = None,
618
+ inputs_embeds: tf.Tensor = None,
619
+ past_key_values_length=0,
620
+ training: bool = False,
621
+ ) -> tf.Tensor:
622
+ """
623
+ Applies embedding based on inputs tensor.
624
+
625
+ Returns:
626
+ final_embeddings (`tf.Tensor`): output embedding tensor.
627
+ """
628
+ if input_ids is None and inputs_embeds is None:
629
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
630
+
631
+ if input_ids is not None:
632
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
633
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
634
+
635
+ input_shape = shape_list(inputs_embeds)[:-1]
636
+
637
+ if token_type_ids is None:
638
+ token_type_ids = tf.fill(dims=input_shape, value=0)
639
+
640
+ if position_ids is None:
641
+ position_ids = tf.expand_dims(
642
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
643
+ )
644
+
645
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
646
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
647
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
648
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
649
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
650
+
651
+ return final_embeddings
652
+
653
+
654
+ class TFElectraDiscriminatorPredictions(keras.layers.Layer):
655
+ def __init__(self, config, **kwargs):
656
+ super().__init__(**kwargs)
657
+
658
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
659
+ self.dense_prediction = keras.layers.Dense(1, name="dense_prediction")
660
+ self.config = config
661
+
662
+ def call(self, discriminator_hidden_states, training=False):
663
+ hidden_states = self.dense(discriminator_hidden_states)
664
+ hidden_states = get_tf_activation(self.config.hidden_act)(hidden_states)
665
+ logits = tf.squeeze(self.dense_prediction(hidden_states), -1)
666
+
667
+ return logits
668
+
669
+ def build(self, input_shape=None):
670
+ if self.built:
671
+ return
672
+ self.built = True
673
+ if getattr(self, "dense", None) is not None:
674
+ with tf.name_scope(self.dense.name):
675
+ self.dense.build([None, None, self.config.hidden_size])
676
+ if getattr(self, "dense_prediction", None) is not None:
677
+ with tf.name_scope(self.dense_prediction.name):
678
+ self.dense_prediction.build([None, None, self.config.hidden_size])
679
+
680
+
681
+ class TFElectraGeneratorPredictions(keras.layers.Layer):
682
+ def __init__(self, config, **kwargs):
683
+ super().__init__(**kwargs)
684
+
685
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
686
+ self.dense = keras.layers.Dense(config.embedding_size, name="dense")
687
+ self.config = config
688
+
689
+ def call(self, generator_hidden_states, training=False):
690
+ hidden_states = self.dense(generator_hidden_states)
691
+ hidden_states = get_tf_activation("gelu")(hidden_states)
692
+ hidden_states = self.LayerNorm(hidden_states)
693
+
694
+ return hidden_states
695
+
696
+ def build(self, input_shape=None):
697
+ if self.built:
698
+ return
699
+ self.built = True
700
+ if getattr(self, "LayerNorm", None) is not None:
701
+ with tf.name_scope(self.LayerNorm.name):
702
+ self.LayerNorm.build([None, None, self.config.embedding_size])
703
+ if getattr(self, "dense", None) is not None:
704
+ with tf.name_scope(self.dense.name):
705
+ self.dense.build([None, None, self.config.hidden_size])
706
+
707
+
708
+ class TFElectraPreTrainedModel(TFPreTrainedModel):
709
+ """
710
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
711
+ models.
712
+ """
713
+
714
+ config_class = ElectraConfig
715
+ base_model_prefix = "electra"
716
+ # When the model is loaded from a PT model
717
+ _keys_to_ignore_on_load_unexpected = [r"generator_lm_head.weight"]
718
+ _keys_to_ignore_on_load_missing = [r"dropout"]
719
+
720
+
721
+ @keras_serializable
722
+ class TFElectraMainLayer(keras.layers.Layer):
723
+ config_class = ElectraConfig
724
+
725
+ def __init__(self, config, **kwargs):
726
+ super().__init__(**kwargs)
727
+
728
+ self.config = config
729
+ self.is_decoder = config.is_decoder
730
+
731
+ self.embeddings = TFElectraEmbeddings(config, name="embeddings")
732
+
733
+ if config.embedding_size != config.hidden_size:
734
+ self.embeddings_project = keras.layers.Dense(config.hidden_size, name="embeddings_project")
735
+
736
+ self.encoder = TFElectraEncoder(config, name="encoder")
737
+
738
+ def get_input_embeddings(self):
739
+ return self.embeddings
740
+
741
+ def set_input_embeddings(self, value):
742
+ self.embeddings.weight = value
743
+ self.embeddings.vocab_size = shape_list(value)[0]
744
+
745
+ def _prune_heads(self, heads_to_prune):
746
+ """
747
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
748
+ class PreTrainedModel
749
+ """
750
+ raise NotImplementedError
751
+
752
+ def get_extended_attention_mask(self, attention_mask, input_shape, dtype, past_key_values_length=0):
753
+ batch_size, seq_length = input_shape
754
+
755
+ if attention_mask is None:
756
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
757
+
758
+ # We create a 3D attention mask from a 2D tensor mask.
759
+ # Sizes are [batch_size, 1, 1, to_seq_length]
760
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
761
+ # this attention mask is more simple than the triangular masking of causal attention
762
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
763
+ attention_mask_shape = shape_list(attention_mask)
764
+
765
+ mask_seq_length = seq_length + past_key_values_length
766
+ # Copied from `modeling_tf_t5.py`
767
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
768
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
769
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
770
+ if self.is_decoder:
771
+ seq_ids = tf.range(mask_seq_length)
772
+ causal_mask = tf.less_equal(
773
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
774
+ seq_ids[None, :, None],
775
+ )
776
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
777
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
778
+ attention_mask_shape = shape_list(extended_attention_mask)
779
+ extended_attention_mask = tf.reshape(
780
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
781
+ )
782
+ if past_key_values_length > 0:
783
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
784
+ else:
785
+ extended_attention_mask = tf.reshape(
786
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
787
+ )
788
+
789
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
790
+ # masked positions, this operation will create a tensor which is 0.0 for
791
+ # positions we want to attend and -10000.0 for masked positions.
792
+ # Since we are adding it to the raw scores before the softmax, this is
793
+ # effectively the same as removing these entirely.
794
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=dtype)
795
+ one_cst = tf.constant(1.0, dtype=dtype)
796
+ ten_thousand_cst = tf.constant(-10000.0, dtype=dtype)
797
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
798
+
799
+ return extended_attention_mask
800
+
801
+ def get_head_mask(self, head_mask):
802
+ if head_mask is not None:
803
+ raise NotImplementedError
804
+ else:
805
+ head_mask = [None] * self.config.num_hidden_layers
806
+
807
+ return head_mask
808
+
809
+ @unpack_inputs
810
+ def call(
811
+ self,
812
+ input_ids: TFModelInputType | None = None,
813
+ attention_mask: np.ndarray | tf.Tensor | None = None,
814
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
815
+ position_ids: np.ndarray | tf.Tensor | None = None,
816
+ head_mask: np.ndarray | tf.Tensor | None = None,
817
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
818
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
819
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
820
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
821
+ use_cache: Optional[bool] = None,
822
+ output_attentions: Optional[bool] = None,
823
+ output_hidden_states: Optional[bool] = None,
824
+ return_dict: Optional[bool] = None,
825
+ training: Optional[bool] = False,
826
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
827
+ if not self.config.is_decoder:
828
+ use_cache = False
829
+
830
+ if input_ids is not None and inputs_embeds is not None:
831
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
832
+ elif input_ids is not None:
833
+ input_shape = shape_list(input_ids)
834
+ elif inputs_embeds is not None:
835
+ input_shape = shape_list(inputs_embeds)[:-1]
836
+ else:
837
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
838
+
839
+ batch_size, seq_length = input_shape
840
+
841
+ if past_key_values is None:
842
+ past_key_values_length = 0
843
+ past_key_values = [None] * len(self.encoder.layer)
844
+ else:
845
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
846
+
847
+ if attention_mask is None:
848
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
849
+
850
+ if token_type_ids is None:
851
+ token_type_ids = tf.fill(dims=input_shape, value=0)
852
+
853
+ hidden_states = self.embeddings(
854
+ input_ids=input_ids,
855
+ position_ids=position_ids,
856
+ token_type_ids=token_type_ids,
857
+ inputs_embeds=inputs_embeds,
858
+ past_key_values_length=past_key_values_length,
859
+ training=training,
860
+ )
861
+ extended_attention_mask = self.get_extended_attention_mask(
862
+ attention_mask, input_shape, hidden_states.dtype, past_key_values_length
863
+ )
864
+
865
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
866
+ if self.is_decoder and encoder_attention_mask is not None:
867
+ # If a 2D ou 3D attention mask is provided for the cross-attention
868
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
869
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
870
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
871
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
872
+ if num_dims_encoder_attention_mask == 3:
873
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
874
+ if num_dims_encoder_attention_mask == 2:
875
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
876
+
877
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
878
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
879
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
880
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
881
+
882
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
883
+ else:
884
+ encoder_extended_attention_mask = None
885
+
886
+ head_mask = self.get_head_mask(head_mask)
887
+
888
+ if hasattr(self, "embeddings_project"):
889
+ hidden_states = self.embeddings_project(hidden_states, training=training)
890
+
891
+ hidden_states = self.encoder(
892
+ hidden_states=hidden_states,
893
+ attention_mask=extended_attention_mask,
894
+ head_mask=head_mask,
895
+ encoder_hidden_states=encoder_hidden_states,
896
+ encoder_attention_mask=encoder_extended_attention_mask,
897
+ past_key_values=past_key_values,
898
+ use_cache=use_cache,
899
+ output_attentions=output_attentions,
900
+ output_hidden_states=output_hidden_states,
901
+ return_dict=return_dict,
902
+ training=training,
903
+ )
904
+
905
+ return hidden_states
906
+
907
+ def build(self, input_shape=None):
908
+ if self.built:
909
+ return
910
+ self.built = True
911
+ if getattr(self, "embeddings", None) is not None:
912
+ with tf.name_scope(self.embeddings.name):
913
+ self.embeddings.build(None)
914
+ if getattr(self, "encoder", None) is not None:
915
+ with tf.name_scope(self.encoder.name):
916
+ self.encoder.build(None)
917
+ if getattr(self, "embeddings_project", None) is not None:
918
+ with tf.name_scope(self.embeddings_project.name):
919
+ self.embeddings_project.build([None, None, self.config.embedding_size])
920
+
921
+
922
+ @dataclass
923
+ class TFElectraForPreTrainingOutput(ModelOutput):
924
+ """
925
+ Output type of [`TFElectraForPreTraining`].
926
+
927
+ Args:
928
+ loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
929
+ Total loss of the ELECTRA objective.
930
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
931
+ Prediction scores of the head (scores for each token before SoftMax).
932
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
933
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
934
+ `(batch_size, sequence_length, hidden_size)`.
935
+
936
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
937
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
938
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
939
+ sequence_length)`.
940
+
941
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
942
+ heads.
943
+ """
944
+
945
+ logits: tf.Tensor = None
946
+ hidden_states: Tuple[tf.Tensor] | None = None
947
+ attentions: Tuple[tf.Tensor] | None = None
948
+
949
+
950
+ ELECTRA_START_DOCSTRING = r"""
951
+
952
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
953
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
954
+ etc.)
955
+
956
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
957
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
958
+ behavior.
959
+
960
+ <Tip>
961
+
962
+ TensorFlow models and layers in `transformers` accept two formats as input:
963
+
964
+ - having all inputs as keyword arguments (like PyTorch models), or
965
+ - having all inputs as a list, tuple or dict in the first positional argument.
966
+
967
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
968
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
969
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
970
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
971
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
972
+ positional argument:
973
+
974
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
975
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
976
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
977
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
978
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
979
+
980
+ Note that when creating models and layers with
981
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
982
+ about any of this, as you can just pass inputs like you would to any other Python function!
983
+
984
+ </Tip>
985
+
986
+ Parameters:
987
+ config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.
988
+ Initializing with a config file does not load the weights associated with the model, only the
989
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
990
+ """
991
+
992
+ ELECTRA_INPUTS_DOCSTRING = r"""
993
+ Args:
994
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
995
+ Indices of input sequence tokens in the vocabulary.
996
+
997
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
998
+ [`PreTrainedTokenizer.encode`] for details.
999
+
1000
+ [What are input IDs?](../glossary#input-ids)
1001
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1002
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1003
+
1004
+ - 1 for tokens that are **not masked**,
1005
+ - 0 for tokens that are **masked**.
1006
+
1007
+ [What are attention masks?](../glossary#attention-mask)
1008
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
1009
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1010
+ config.max_position_embeddings - 1]`.
1011
+
1012
+ [What are position IDs?](../glossary#position-ids)
1013
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
1014
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
1015
+
1016
+ - 1 indicates the head is **not masked**,
1017
+ - 0 indicates the head is **masked**.
1018
+
1019
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
1020
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1021
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1022
+ model's internal embedding lookup matrix.
1023
+ output_attentions (`bool`, *optional*):
1024
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1025
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1026
+ config will be used instead.
1027
+ output_hidden_states (`bool`, *optional*):
1028
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1029
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1030
+ used instead.
1031
+ return_dict (`bool`, *optional*):
1032
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1033
+ eager mode, in graph mode the value will always be set to True.
1034
+ training (`bool`, *optional*, defaults to `False`):
1035
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1036
+ behaviors between training and evaluation).
1037
+ """
1038
+
1039
+
1040
+ @add_start_docstrings(
1041
+ "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
1042
+ "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
1043
+ "hidden size and embedding size are different. "
1044
+ ""
1045
+ "Both the generator and discriminator checkpoints may be loaded into this model.",
1046
+ ELECTRA_START_DOCSTRING,
1047
+ )
1048
+ class TFElectraModel(TFElectraPreTrainedModel):
1049
+ def __init__(self, config, *inputs, **kwargs):
1050
+ super().__init__(config, *inputs, **kwargs)
1051
+
1052
+ self.electra = TFElectraMainLayer(config, name="electra")
1053
+
1054
+ @unpack_inputs
1055
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1056
+ @add_code_sample_docstrings(
1057
+ checkpoint=_CHECKPOINT_FOR_DOC,
1058
+ output_type=TFBaseModelOutputWithPastAndCrossAttentions,
1059
+ config_class=_CONFIG_FOR_DOC,
1060
+ )
1061
+ def call(
1062
+ self,
1063
+ input_ids: TFModelInputType | None = None,
1064
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1065
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1066
+ position_ids: np.ndarray | tf.Tensor | None = None,
1067
+ head_mask: np.ndarray | tf.Tensor | None = None,
1068
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1069
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1070
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1071
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1072
+ use_cache: Optional[bool] = None,
1073
+ output_attentions: Optional[bool] = None,
1074
+ output_hidden_states: Optional[bool] = None,
1075
+ return_dict: Optional[bool] = None,
1076
+ training: Optional[bool] = False,
1077
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
1078
+ r"""
1079
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1080
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1081
+ the model is configured as a decoder.
1082
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1083
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1084
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1085
+
1086
+ - 1 for tokens that are **not masked**,
1087
+ - 0 for tokens that are **masked**.
1088
+
1089
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1090
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1091
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1092
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1093
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1094
+ use_cache (`bool`, *optional*, defaults to `True`):
1095
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1096
+ `past_key_values`). Set to `False` during training, `True` during generation
1097
+ """
1098
+ outputs = self.electra(
1099
+ input_ids=input_ids,
1100
+ attention_mask=attention_mask,
1101
+ token_type_ids=token_type_ids,
1102
+ position_ids=position_ids,
1103
+ head_mask=head_mask,
1104
+ encoder_hidden_states=encoder_hidden_states,
1105
+ encoder_attention_mask=encoder_attention_mask,
1106
+ past_key_values=past_key_values,
1107
+ use_cache=use_cache,
1108
+ inputs_embeds=inputs_embeds,
1109
+ output_attentions=output_attentions,
1110
+ output_hidden_states=output_hidden_states,
1111
+ return_dict=return_dict,
1112
+ training=training,
1113
+ )
1114
+
1115
+ return outputs
1116
+
1117
+ def build(self, input_shape=None):
1118
+ if self.built:
1119
+ return
1120
+ self.built = True
1121
+ if getattr(self, "electra", None) is not None:
1122
+ with tf.name_scope(self.electra.name):
1123
+ self.electra.build(None)
1124
+
1125
+
1126
+ @add_start_docstrings(
1127
+ """
1128
+ Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
1129
+
1130
+ Even though both the discriminator and generator may be loaded into this model, the discriminator is the only model
1131
+ of the two to have the correct classification head to be used for this model.
1132
+ """,
1133
+ ELECTRA_START_DOCSTRING,
1134
+ )
1135
+ class TFElectraForPreTraining(TFElectraPreTrainedModel):
1136
+ def __init__(self, config, **kwargs):
1137
+ super().__init__(config, **kwargs)
1138
+
1139
+ self.electra = TFElectraMainLayer(config, name="electra")
1140
+ self.discriminator_predictions = TFElectraDiscriminatorPredictions(config, name="discriminator_predictions")
1141
+
1142
+ @unpack_inputs
1143
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1144
+ @replace_return_docstrings(output_type=TFElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1145
+ def call(
1146
+ self,
1147
+ input_ids: TFModelInputType | None = None,
1148
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1149
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1150
+ position_ids: np.ndarray | tf.Tensor | None = None,
1151
+ head_mask: np.ndarray | tf.Tensor | None = None,
1152
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1153
+ output_attentions: Optional[bool] = None,
1154
+ output_hidden_states: Optional[bool] = None,
1155
+ return_dict: Optional[bool] = None,
1156
+ training: Optional[bool] = False,
1157
+ ) -> Union[TFElectraForPreTrainingOutput, Tuple[tf.Tensor]]:
1158
+ r"""
1159
+ Returns:
1160
+
1161
+ Examples:
1162
+
1163
+ ```python
1164
+ >>> import tensorflow as tf
1165
+ >>> from transformers import AutoTokenizer, TFElectraForPreTraining
1166
+
1167
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-small-discriminator")
1168
+ >>> model = TFElectraForPreTraining.from_pretrained("google/electra-small-discriminator")
1169
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
1170
+ >>> outputs = model(input_ids)
1171
+ >>> scores = outputs[0]
1172
+ ```"""
1173
+ discriminator_hidden_states = self.electra(
1174
+ input_ids=input_ids,
1175
+ attention_mask=attention_mask,
1176
+ token_type_ids=token_type_ids,
1177
+ position_ids=position_ids,
1178
+ head_mask=head_mask,
1179
+ inputs_embeds=inputs_embeds,
1180
+ output_attentions=output_attentions,
1181
+ output_hidden_states=output_hidden_states,
1182
+ return_dict=return_dict,
1183
+ training=training,
1184
+ )
1185
+ discriminator_sequence_output = discriminator_hidden_states[0]
1186
+ logits = self.discriminator_predictions(discriminator_sequence_output)
1187
+
1188
+ if not return_dict:
1189
+ return (logits,) + discriminator_hidden_states[1:]
1190
+
1191
+ return TFElectraForPreTrainingOutput(
1192
+ logits=logits,
1193
+ hidden_states=discriminator_hidden_states.hidden_states,
1194
+ attentions=discriminator_hidden_states.attentions,
1195
+ )
1196
+
1197
+ def build(self, input_shape=None):
1198
+ if self.built:
1199
+ return
1200
+ self.built = True
1201
+ if getattr(self, "electra", None) is not None:
1202
+ with tf.name_scope(self.electra.name):
1203
+ self.electra.build(None)
1204
+ if getattr(self, "discriminator_predictions", None) is not None:
1205
+ with tf.name_scope(self.discriminator_predictions.name):
1206
+ self.discriminator_predictions.build(None)
1207
+
1208
+
1209
+ class TFElectraMaskedLMHead(keras.layers.Layer):
1210
+ def __init__(self, config, input_embeddings, **kwargs):
1211
+ super().__init__(**kwargs)
1212
+
1213
+ self.config = config
1214
+ self.embedding_size = config.embedding_size
1215
+ self.input_embeddings = input_embeddings
1216
+
1217
+ def build(self, input_shape):
1218
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1219
+
1220
+ super().build(input_shape)
1221
+
1222
+ def get_output_embeddings(self):
1223
+ return self.input_embeddings
1224
+
1225
+ def set_output_embeddings(self, value):
1226
+ self.input_embeddings.weight = value
1227
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1228
+
1229
+ def get_bias(self):
1230
+ return {"bias": self.bias}
1231
+
1232
+ def set_bias(self, value):
1233
+ self.bias = value["bias"]
1234
+ self.config.vocab_size = shape_list(value["bias"])[0]
1235
+
1236
+ def call(self, hidden_states):
1237
+ seq_length = shape_list(tensor=hidden_states)[1]
1238
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
1239
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1240
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1241
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1242
+
1243
+ return hidden_states
1244
+
1245
+
1246
+ @add_start_docstrings(
1247
+ """
1248
+ Electra model with a language modeling head on top.
1249
+
1250
+ Even though both the discriminator and generator may be loaded into this model, the generator is the only model of
1251
+ the two to have been trained for the masked language modeling task.
1252
+ """,
1253
+ ELECTRA_START_DOCSTRING,
1254
+ )
1255
+ class TFElectraForMaskedLM(TFElectraPreTrainedModel, TFMaskedLanguageModelingLoss):
1256
+ def __init__(self, config, **kwargs):
1257
+ super().__init__(config, **kwargs)
1258
+
1259
+ self.config = config
1260
+ self.electra = TFElectraMainLayer(config, name="electra")
1261
+ self.generator_predictions = TFElectraGeneratorPredictions(config, name="generator_predictions")
1262
+
1263
+ if isinstance(config.hidden_act, str):
1264
+ self.activation = get_tf_activation(config.hidden_act)
1265
+ else:
1266
+ self.activation = config.hidden_act
1267
+
1268
+ self.generator_lm_head = TFElectraMaskedLMHead(config, self.electra.embeddings, name="generator_lm_head")
1269
+
1270
+ def get_lm_head(self):
1271
+ return self.generator_lm_head
1272
+
1273
+ def get_prefix_bias_name(self):
1274
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1275
+ return self.name + "/" + self.generator_lm_head.name
1276
+
1277
+ @unpack_inputs
1278
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1279
+ @add_code_sample_docstrings(
1280
+ checkpoint="google/electra-small-generator",
1281
+ output_type=TFMaskedLMOutput,
1282
+ config_class=_CONFIG_FOR_DOC,
1283
+ mask="[MASK]",
1284
+ expected_output="'paris'",
1285
+ expected_loss=1.22,
1286
+ )
1287
+ def call(
1288
+ self,
1289
+ input_ids: TFModelInputType | None = None,
1290
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1291
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1292
+ position_ids: np.ndarray | tf.Tensor | None = None,
1293
+ head_mask: np.ndarray | tf.Tensor | None = None,
1294
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1295
+ output_attentions: Optional[bool] = None,
1296
+ output_hidden_states: Optional[bool] = None,
1297
+ return_dict: Optional[bool] = None,
1298
+ labels: np.ndarray | tf.Tensor | None = None,
1299
+ training: Optional[bool] = False,
1300
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1301
+ r"""
1302
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1303
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1304
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1305
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1306
+ """
1307
+ generator_hidden_states = self.electra(
1308
+ input_ids=input_ids,
1309
+ attention_mask=attention_mask,
1310
+ token_type_ids=token_type_ids,
1311
+ position_ids=position_ids,
1312
+ head_mask=head_mask,
1313
+ inputs_embeds=inputs_embeds,
1314
+ output_attentions=output_attentions,
1315
+ output_hidden_states=output_hidden_states,
1316
+ return_dict=return_dict,
1317
+ training=training,
1318
+ )
1319
+ generator_sequence_output = generator_hidden_states[0]
1320
+ prediction_scores = self.generator_predictions(generator_sequence_output, training=training)
1321
+ prediction_scores = self.generator_lm_head(prediction_scores, training=training)
1322
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1323
+
1324
+ if not return_dict:
1325
+ output = (prediction_scores,) + generator_hidden_states[1:]
1326
+
1327
+ return ((loss,) + output) if loss is not None else output
1328
+
1329
+ return TFMaskedLMOutput(
1330
+ loss=loss,
1331
+ logits=prediction_scores,
1332
+ hidden_states=generator_hidden_states.hidden_states,
1333
+ attentions=generator_hidden_states.attentions,
1334
+ )
1335
+
1336
+ def build(self, input_shape=None):
1337
+ if self.built:
1338
+ return
1339
+ self.built = True
1340
+ if getattr(self, "electra", None) is not None:
1341
+ with tf.name_scope(self.electra.name):
1342
+ self.electra.build(None)
1343
+ if getattr(self, "generator_predictions", None) is not None:
1344
+ with tf.name_scope(self.generator_predictions.name):
1345
+ self.generator_predictions.build(None)
1346
+ if getattr(self, "generator_lm_head", None) is not None:
1347
+ with tf.name_scope(self.generator_lm_head.name):
1348
+ self.generator_lm_head.build(None)
1349
+
1350
+
1351
+ class TFElectraClassificationHead(keras.layers.Layer):
1352
+ """Head for sentence-level classification tasks."""
1353
+
1354
+ def __init__(self, config, **kwargs):
1355
+ super().__init__(**kwargs)
1356
+
1357
+ self.dense = keras.layers.Dense(
1358
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1359
+ )
1360
+ classifier_dropout = (
1361
+ config.classifhidden_dropout_probier_dropout
1362
+ if config.classifier_dropout is not None
1363
+ else config.hidden_dropout_prob
1364
+ )
1365
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1366
+ self.out_proj = keras.layers.Dense(
1367
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
1368
+ )
1369
+ self.config = config
1370
+
1371
+ def call(self, inputs, **kwargs):
1372
+ x = inputs[:, 0, :] # take <s> token (equiv. to [CLS])
1373
+ x = self.dropout(x)
1374
+ x = self.dense(x)
1375
+ x = get_tf_activation("gelu")(x) # although BERT uses tanh here, it seems Electra authors used gelu here
1376
+ x = self.dropout(x)
1377
+ x = self.out_proj(x)
1378
+
1379
+ return x
1380
+
1381
+ def build(self, input_shape=None):
1382
+ if self.built:
1383
+ return
1384
+ self.built = True
1385
+ if getattr(self, "dense", None) is not None:
1386
+ with tf.name_scope(self.dense.name):
1387
+ self.dense.build([None, None, self.config.hidden_size])
1388
+ if getattr(self, "out_proj", None) is not None:
1389
+ with tf.name_scope(self.out_proj.name):
1390
+ self.out_proj.build([None, None, self.config.hidden_size])
1391
+
1392
+
1393
+ @add_start_docstrings(
1394
+ """
1395
+ ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the
1396
+ pooled output) e.g. for GLUE tasks.
1397
+ """,
1398
+ ELECTRA_START_DOCSTRING,
1399
+ )
1400
+ class TFElectraForSequenceClassification(TFElectraPreTrainedModel, TFSequenceClassificationLoss):
1401
+ def __init__(self, config, *inputs, **kwargs):
1402
+ super().__init__(config, *inputs, **kwargs)
1403
+ self.num_labels = config.num_labels
1404
+ self.electra = TFElectraMainLayer(config, name="electra")
1405
+ self.classifier = TFElectraClassificationHead(config, name="classifier")
1406
+
1407
+ @unpack_inputs
1408
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1409
+ @add_code_sample_docstrings(
1410
+ checkpoint="bhadresh-savani/electra-base-emotion",
1411
+ output_type=TFSequenceClassifierOutput,
1412
+ config_class=_CONFIG_FOR_DOC,
1413
+ expected_output="'joy'",
1414
+ expected_loss=0.06,
1415
+ )
1416
+ def call(
1417
+ self,
1418
+ input_ids: TFModelInputType | None = None,
1419
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1420
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1421
+ position_ids: np.ndarray | tf.Tensor | None = None,
1422
+ head_mask: np.ndarray | tf.Tensor | None = None,
1423
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1424
+ output_attentions: Optional[bool] = None,
1425
+ output_hidden_states: Optional[bool] = None,
1426
+ return_dict: Optional[bool] = None,
1427
+ labels: np.ndarray | tf.Tensor | None = None,
1428
+ training: Optional[bool] = False,
1429
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1430
+ r"""
1431
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1432
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1433
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1434
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1435
+ """
1436
+ outputs = self.electra(
1437
+ input_ids=input_ids,
1438
+ attention_mask=attention_mask,
1439
+ token_type_ids=token_type_ids,
1440
+ position_ids=position_ids,
1441
+ head_mask=head_mask,
1442
+ inputs_embeds=inputs_embeds,
1443
+ output_attentions=output_attentions,
1444
+ output_hidden_states=output_hidden_states,
1445
+ return_dict=return_dict,
1446
+ training=training,
1447
+ )
1448
+ logits = self.classifier(outputs[0])
1449
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1450
+
1451
+ if not return_dict:
1452
+ output = (logits,) + outputs[1:]
1453
+
1454
+ return ((loss,) + output) if loss is not None else output
1455
+
1456
+ return TFSequenceClassifierOutput(
1457
+ loss=loss,
1458
+ logits=logits,
1459
+ hidden_states=outputs.hidden_states,
1460
+ attentions=outputs.attentions,
1461
+ )
1462
+
1463
+ def build(self, input_shape=None):
1464
+ if self.built:
1465
+ return
1466
+ self.built = True
1467
+ if getattr(self, "electra", None) is not None:
1468
+ with tf.name_scope(self.electra.name):
1469
+ self.electra.build(None)
1470
+ if getattr(self, "classifier", None) is not None:
1471
+ with tf.name_scope(self.classifier.name):
1472
+ self.classifier.build(None)
1473
+
1474
+
1475
+ @add_start_docstrings(
1476
+ """
1477
+ ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1478
+ softmax) e.g. for RocStories/SWAG tasks.
1479
+ """,
1480
+ ELECTRA_START_DOCSTRING,
1481
+ )
1482
+ class TFElectraForMultipleChoice(TFElectraPreTrainedModel, TFMultipleChoiceLoss):
1483
+ def __init__(self, config, *inputs, **kwargs):
1484
+ super().__init__(config, *inputs, **kwargs)
1485
+
1486
+ self.electra = TFElectraMainLayer(config, name="electra")
1487
+ self.sequence_summary = TFSequenceSummary(
1488
+ config, initializer_range=config.initializer_range, name="sequence_summary"
1489
+ )
1490
+ self.classifier = keras.layers.Dense(
1491
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1492
+ )
1493
+ self.config = config
1494
+
1495
+ @unpack_inputs
1496
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1497
+ @add_code_sample_docstrings(
1498
+ checkpoint=_CHECKPOINT_FOR_DOC,
1499
+ output_type=TFMultipleChoiceModelOutput,
1500
+ config_class=_CONFIG_FOR_DOC,
1501
+ )
1502
+ def call(
1503
+ self,
1504
+ input_ids: TFModelInputType | None = None,
1505
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1506
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1507
+ position_ids: np.ndarray | tf.Tensor | None = None,
1508
+ head_mask: np.ndarray | tf.Tensor | None = None,
1509
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1510
+ output_attentions: Optional[bool] = None,
1511
+ output_hidden_states: Optional[bool] = None,
1512
+ return_dict: Optional[bool] = None,
1513
+ labels: np.ndarray | tf.Tensor | None = None,
1514
+ training: Optional[bool] = False,
1515
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1516
+ r"""
1517
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1518
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1519
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1520
+ """
1521
+
1522
+ if input_ids is not None:
1523
+ num_choices = shape_list(input_ids)[1]
1524
+ seq_length = shape_list(input_ids)[2]
1525
+ else:
1526
+ num_choices = shape_list(inputs_embeds)[1]
1527
+ seq_length = shape_list(inputs_embeds)[2]
1528
+
1529
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1530
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1531
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1532
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1533
+ flat_inputs_embeds = (
1534
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1535
+ if inputs_embeds is not None
1536
+ else None
1537
+ )
1538
+ outputs = self.electra(
1539
+ input_ids=flat_input_ids,
1540
+ attention_mask=flat_attention_mask,
1541
+ token_type_ids=flat_token_type_ids,
1542
+ position_ids=flat_position_ids,
1543
+ head_mask=head_mask,
1544
+ inputs_embeds=flat_inputs_embeds,
1545
+ output_attentions=output_attentions,
1546
+ output_hidden_states=output_hidden_states,
1547
+ return_dict=return_dict,
1548
+ training=training,
1549
+ )
1550
+ logits = self.sequence_summary(outputs[0])
1551
+ logits = self.classifier(logits)
1552
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1553
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1554
+
1555
+ if not return_dict:
1556
+ output = (reshaped_logits,) + outputs[1:]
1557
+
1558
+ return ((loss,) + output) if loss is not None else output
1559
+
1560
+ return TFMultipleChoiceModelOutput(
1561
+ loss=loss,
1562
+ logits=reshaped_logits,
1563
+ hidden_states=outputs.hidden_states,
1564
+ attentions=outputs.attentions,
1565
+ )
1566
+
1567
+ def build(self, input_shape=None):
1568
+ if self.built:
1569
+ return
1570
+ self.built = True
1571
+ if getattr(self, "electra", None) is not None:
1572
+ with tf.name_scope(self.electra.name):
1573
+ self.electra.build(None)
1574
+ if getattr(self, "sequence_summary", None) is not None:
1575
+ with tf.name_scope(self.sequence_summary.name):
1576
+ self.sequence_summary.build(None)
1577
+ if getattr(self, "classifier", None) is not None:
1578
+ with tf.name_scope(self.classifier.name):
1579
+ self.classifier.build([None, None, self.config.hidden_size])
1580
+
1581
+
1582
+ @add_start_docstrings(
1583
+ """
1584
+ Electra model with a token classification head on top.
1585
+
1586
+ Both the discriminator and generator may be loaded into this model.
1587
+ """,
1588
+ ELECTRA_START_DOCSTRING,
1589
+ )
1590
+ class TFElectraForTokenClassification(TFElectraPreTrainedModel, TFTokenClassificationLoss):
1591
+ def __init__(self, config, **kwargs):
1592
+ super().__init__(config, **kwargs)
1593
+
1594
+ self.electra = TFElectraMainLayer(config, name="electra")
1595
+ classifier_dropout = (
1596
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1597
+ )
1598
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1599
+ self.classifier = keras.layers.Dense(
1600
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1601
+ )
1602
+ self.config = config
1603
+
1604
+ @unpack_inputs
1605
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1606
+ @add_code_sample_docstrings(
1607
+ checkpoint="bhadresh-savani/electra-base-discriminator-finetuned-conll03-english",
1608
+ output_type=TFTokenClassifierOutput,
1609
+ config_class=_CONFIG_FOR_DOC,
1610
+ expected_output="['B-LOC', 'B-ORG', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'O', 'B-LOC', 'I-LOC']",
1611
+ expected_loss=0.11,
1612
+ )
1613
+ def call(
1614
+ self,
1615
+ input_ids: TFModelInputType | None = None,
1616
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1617
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1618
+ position_ids: np.ndarray | tf.Tensor | None = None,
1619
+ head_mask: np.ndarray | tf.Tensor | None = None,
1620
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1621
+ output_attentions: Optional[bool] = None,
1622
+ output_hidden_states: Optional[bool] = None,
1623
+ return_dict: Optional[bool] = None,
1624
+ labels: np.ndarray | tf.Tensor | None = None,
1625
+ training: Optional[bool] = False,
1626
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1627
+ r"""
1628
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1629
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1630
+ """
1631
+ discriminator_hidden_states = self.electra(
1632
+ input_ids=input_ids,
1633
+ attention_mask=attention_mask,
1634
+ token_type_ids=token_type_ids,
1635
+ position_ids=position_ids,
1636
+ head_mask=head_mask,
1637
+ inputs_embeds=inputs_embeds,
1638
+ output_attentions=output_attentions,
1639
+ output_hidden_states=output_hidden_states,
1640
+ return_dict=return_dict,
1641
+ training=training,
1642
+ )
1643
+ discriminator_sequence_output = discriminator_hidden_states[0]
1644
+ discriminator_sequence_output = self.dropout(discriminator_sequence_output)
1645
+ logits = self.classifier(discriminator_sequence_output)
1646
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1647
+
1648
+ if not return_dict:
1649
+ output = (logits,) + discriminator_hidden_states[1:]
1650
+
1651
+ return ((loss,) + output) if loss is not None else output
1652
+
1653
+ return TFTokenClassifierOutput(
1654
+ loss=loss,
1655
+ logits=logits,
1656
+ hidden_states=discriminator_hidden_states.hidden_states,
1657
+ attentions=discriminator_hidden_states.attentions,
1658
+ )
1659
+
1660
+ def build(self, input_shape=None):
1661
+ if self.built:
1662
+ return
1663
+ self.built = True
1664
+ if getattr(self, "electra", None) is not None:
1665
+ with tf.name_scope(self.electra.name):
1666
+ self.electra.build(None)
1667
+ if getattr(self, "classifier", None) is not None:
1668
+ with tf.name_scope(self.classifier.name):
1669
+ self.classifier.build([None, None, self.config.hidden_size])
1670
+
1671
+
1672
+ @add_start_docstrings(
1673
+ """
1674
+ Electra Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1675
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1676
+ """,
1677
+ ELECTRA_START_DOCSTRING,
1678
+ )
1679
+ class TFElectraForQuestionAnswering(TFElectraPreTrainedModel, TFQuestionAnsweringLoss):
1680
+ def __init__(self, config, *inputs, **kwargs):
1681
+ super().__init__(config, *inputs, **kwargs)
1682
+
1683
+ self.num_labels = config.num_labels
1684
+ self.electra = TFElectraMainLayer(config, name="electra")
1685
+ self.qa_outputs = keras.layers.Dense(
1686
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1687
+ )
1688
+ self.config = config
1689
+
1690
+ @unpack_inputs
1691
+ @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1692
+ @add_code_sample_docstrings(
1693
+ checkpoint="bhadresh-savani/electra-base-squad2",
1694
+ output_type=TFQuestionAnsweringModelOutput,
1695
+ config_class=_CONFIG_FOR_DOC,
1696
+ qa_target_start_index=11,
1697
+ qa_target_end_index=12,
1698
+ expected_output="'a nice puppet'",
1699
+ expected_loss=2.64,
1700
+ )
1701
+ def call(
1702
+ self,
1703
+ input_ids: TFModelInputType | None = None,
1704
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1705
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1706
+ position_ids: np.ndarray | tf.Tensor | None = None,
1707
+ head_mask: np.ndarray | tf.Tensor | None = None,
1708
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1709
+ output_attentions: Optional[bool] = None,
1710
+ output_hidden_states: Optional[bool] = None,
1711
+ return_dict: Optional[bool] = None,
1712
+ start_positions: np.ndarray | tf.Tensor | None = None,
1713
+ end_positions: np.ndarray | tf.Tensor | None = None,
1714
+ training: Optional[bool] = False,
1715
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1716
+ r"""
1717
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1718
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1719
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1720
+ are not taken into account for computing the loss.
1721
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1722
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1723
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1724
+ are not taken into account for computing the loss.
1725
+ """
1726
+ discriminator_hidden_states = self.electra(
1727
+ input_ids=input_ids,
1728
+ attention_mask=attention_mask,
1729
+ token_type_ids=token_type_ids,
1730
+ position_ids=position_ids,
1731
+ head_mask=head_mask,
1732
+ inputs_embeds=inputs_embeds,
1733
+ output_attentions=output_attentions,
1734
+ output_hidden_states=output_hidden_states,
1735
+ return_dict=return_dict,
1736
+ training=training,
1737
+ )
1738
+ discriminator_sequence_output = discriminator_hidden_states[0]
1739
+ logits = self.qa_outputs(discriminator_sequence_output)
1740
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1741
+ start_logits = tf.squeeze(start_logits, axis=-1)
1742
+ end_logits = tf.squeeze(end_logits, axis=-1)
1743
+ loss = None
1744
+
1745
+ if start_positions is not None and end_positions is not None:
1746
+ labels = {"start_position": start_positions}
1747
+ labels["end_position"] = end_positions
1748
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1749
+
1750
+ if not return_dict:
1751
+ output = (
1752
+ start_logits,
1753
+ end_logits,
1754
+ ) + discriminator_hidden_states[1:]
1755
+
1756
+ return ((loss,) + output) if loss is not None else output
1757
+
1758
+ return TFQuestionAnsweringModelOutput(
1759
+ loss=loss,
1760
+ start_logits=start_logits,
1761
+ end_logits=end_logits,
1762
+ hidden_states=discriminator_hidden_states.hidden_states,
1763
+ attentions=discriminator_hidden_states.attentions,
1764
+ )
1765
+
1766
+ def build(self, input_shape=None):
1767
+ if self.built:
1768
+ return
1769
+ self.built = True
1770
+ if getattr(self, "electra", None) is not None:
1771
+ with tf.name_scope(self.electra.name):
1772
+ self.electra.build(None)
1773
+ if getattr(self, "qa_outputs", None) is not None:
1774
+ with tf.name_scope(self.qa_outputs.name):
1775
+ self.qa_outputs.build([None, None, self.config.hidden_size])
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/tokenization_electra.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import collections
17
+ import os
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
28
+
29
+ PRETRAINED_VOCAB_FILES_MAP = {
30
+ "vocab_file": {
31
+ "google/electra-small-generator": (
32
+ "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
33
+ ),
34
+ "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
35
+ "google/electra-large-generator": (
36
+ "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
37
+ ),
38
+ "google/electra-small-discriminator": (
39
+ "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
40
+ ),
41
+ "google/electra-base-discriminator": (
42
+ "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
43
+ ),
44
+ "google/electra-large-discriminator": (
45
+ "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
46
+ ),
47
+ }
48
+ }
49
+
50
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
51
+ "google/electra-small-generator": 512,
52
+ "google/electra-base-generator": 512,
53
+ "google/electra-large-generator": 512,
54
+ "google/electra-small-discriminator": 512,
55
+ "google/electra-base-discriminator": 512,
56
+ "google/electra-large-discriminator": 512,
57
+ }
58
+
59
+
60
+ PRETRAINED_INIT_CONFIGURATION = {
61
+ "google/electra-small-generator": {"do_lower_case": True},
62
+ "google/electra-base-generator": {"do_lower_case": True},
63
+ "google/electra-large-generator": {"do_lower_case": True},
64
+ "google/electra-small-discriminator": {"do_lower_case": True},
65
+ "google/electra-base-discriminator": {"do_lower_case": True},
66
+ "google/electra-large-discriminator": {"do_lower_case": True},
67
+ }
68
+
69
+
70
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
71
+ def load_vocab(vocab_file):
72
+ """Loads a vocabulary file into a dictionary."""
73
+ vocab = collections.OrderedDict()
74
+ with open(vocab_file, "r", encoding="utf-8") as reader:
75
+ tokens = reader.readlines()
76
+ for index, token in enumerate(tokens):
77
+ token = token.rstrip("\n")
78
+ vocab[token] = index
79
+ return vocab
80
+
81
+
82
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
83
+ def whitespace_tokenize(text):
84
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
85
+ text = text.strip()
86
+ if not text:
87
+ return []
88
+ tokens = text.split()
89
+ return tokens
90
+
91
+
92
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->Electra,BERT->Electra
93
+ class ElectraTokenizer(PreTrainedTokenizer):
94
+ r"""
95
+ Construct a Electra tokenizer. Based on WordPiece.
96
+
97
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
98
+ this superclass for more information regarding those methods.
99
+
100
+ Args:
101
+ vocab_file (`str`):
102
+ File containing the vocabulary.
103
+ do_lower_case (`bool`, *optional*, defaults to `True`):
104
+ Whether or not to lowercase the input when tokenizing.
105
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
106
+ Whether or not to do basic tokenization before WordPiece.
107
+ never_split (`Iterable`, *optional*):
108
+ Collection of tokens which will never be split during tokenization. Only has an effect when
109
+ `do_basic_tokenize=True`
110
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
111
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
112
+ token instead.
113
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
114
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
115
+ sequence classification or for a text and a question for question answering. It is also used as the last
116
+ token of a sequence built with special tokens.
117
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
118
+ The token used for padding, for example when batching sequences of different lengths.
119
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
120
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
121
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
122
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
123
+ The token used for masking values. This is the token used when training this model with masked language
124
+ modeling. This is the token which the model will try to predict.
125
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
126
+ Whether or not to tokenize Chinese characters.
127
+
128
+ This should likely be deactivated for Japanese (see this
129
+ [issue](https://github.com/huggingface/transformers/issues/328)).
130
+ strip_accents (`bool`, *optional*):
131
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
132
+ value for `lowercase` (as in the original Electra).
133
+ """
134
+
135
+ vocab_files_names = VOCAB_FILES_NAMES
136
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
137
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
138
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
139
+
140
+ def __init__(
141
+ self,
142
+ vocab_file,
143
+ do_lower_case=True,
144
+ do_basic_tokenize=True,
145
+ never_split=None,
146
+ unk_token="[UNK]",
147
+ sep_token="[SEP]",
148
+ pad_token="[PAD]",
149
+ cls_token="[CLS]",
150
+ mask_token="[MASK]",
151
+ tokenize_chinese_chars=True,
152
+ strip_accents=None,
153
+ **kwargs,
154
+ ):
155
+ if not os.path.isfile(vocab_file):
156
+ raise ValueError(
157
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
158
+ " model use `tokenizer = ElectraTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
159
+ )
160
+ self.vocab = load_vocab(vocab_file)
161
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
162
+ self.do_basic_tokenize = do_basic_tokenize
163
+ if do_basic_tokenize:
164
+ self.basic_tokenizer = BasicTokenizer(
165
+ do_lower_case=do_lower_case,
166
+ never_split=never_split,
167
+ tokenize_chinese_chars=tokenize_chinese_chars,
168
+ strip_accents=strip_accents,
169
+ )
170
+
171
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
172
+
173
+ super().__init__(
174
+ do_lower_case=do_lower_case,
175
+ do_basic_tokenize=do_basic_tokenize,
176
+ never_split=never_split,
177
+ unk_token=unk_token,
178
+ sep_token=sep_token,
179
+ pad_token=pad_token,
180
+ cls_token=cls_token,
181
+ mask_token=mask_token,
182
+ tokenize_chinese_chars=tokenize_chinese_chars,
183
+ strip_accents=strip_accents,
184
+ **kwargs,
185
+ )
186
+
187
+ @property
188
+ def do_lower_case(self):
189
+ return self.basic_tokenizer.do_lower_case
190
+
191
+ @property
192
+ def vocab_size(self):
193
+ return len(self.vocab)
194
+
195
+ def get_vocab(self):
196
+ return dict(self.vocab, **self.added_tokens_encoder)
197
+
198
+ def _tokenize(self, text, split_special_tokens=False):
199
+ split_tokens = []
200
+ if self.do_basic_tokenize:
201
+ for token in self.basic_tokenizer.tokenize(
202
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
203
+ ):
204
+ # If the token is part of the never_split set
205
+ if token in self.basic_tokenizer.never_split:
206
+ split_tokens.append(token)
207
+ else:
208
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
209
+ else:
210
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
211
+ return split_tokens
212
+
213
+ def _convert_token_to_id(self, token):
214
+ """Converts a token (str) in an id using the vocab."""
215
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
216
+
217
+ def _convert_id_to_token(self, index):
218
+ """Converts an index (integer) in a token (str) using the vocab."""
219
+ return self.ids_to_tokens.get(index, self.unk_token)
220
+
221
+ def convert_tokens_to_string(self, tokens):
222
+ """Converts a sequence of tokens (string) in a single string."""
223
+ out_string = " ".join(tokens).replace(" ##", "").strip()
224
+ return out_string
225
+
226
+ def build_inputs_with_special_tokens(
227
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
228
+ ) -> List[int]:
229
+ """
230
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
231
+ adding special tokens. A Electra sequence has the following format:
232
+
233
+ - single sequence: `[CLS] X [SEP]`
234
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
235
+
236
+ Args:
237
+ token_ids_0 (`List[int]`):
238
+ List of IDs to which the special tokens will be added.
239
+ token_ids_1 (`List[int]`, *optional*):
240
+ Optional second list of IDs for sequence pairs.
241
+
242
+ Returns:
243
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
244
+ """
245
+ if token_ids_1 is None:
246
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
247
+ cls = [self.cls_token_id]
248
+ sep = [self.sep_token_id]
249
+ return cls + token_ids_0 + sep + token_ids_1 + sep
250
+
251
+ def get_special_tokens_mask(
252
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
253
+ ) -> List[int]:
254
+ """
255
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
256
+ special tokens using the tokenizer `prepare_for_model` method.
257
+
258
+ Args:
259
+ token_ids_0 (`List[int]`):
260
+ List of IDs.
261
+ token_ids_1 (`List[int]`, *optional*):
262
+ Optional second list of IDs for sequence pairs.
263
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
264
+ Whether or not the token list is already formatted with special tokens for the model.
265
+
266
+ Returns:
267
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
268
+ """
269
+
270
+ if already_has_special_tokens:
271
+ return super().get_special_tokens_mask(
272
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
273
+ )
274
+
275
+ if token_ids_1 is not None:
276
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
277
+ return [1] + ([0] * len(token_ids_0)) + [1]
278
+
279
+ def create_token_type_ids_from_sequences(
280
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
281
+ ) -> List[int]:
282
+ """
283
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Electra sequence
284
+ pair mask has the following format:
285
+
286
+ ```
287
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
288
+ | first sequence | second sequence |
289
+ ```
290
+
291
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
292
+
293
+ Args:
294
+ token_ids_0 (`List[int]`):
295
+ List of IDs.
296
+ token_ids_1 (`List[int]`, *optional*):
297
+ Optional second list of IDs for sequence pairs.
298
+
299
+ Returns:
300
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
301
+ """
302
+ sep = [self.sep_token_id]
303
+ cls = [self.cls_token_id]
304
+ if token_ids_1 is None:
305
+ return len(cls + token_ids_0 + sep) * [0]
306
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
307
+
308
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
309
+ index = 0
310
+ if os.path.isdir(save_directory):
311
+ vocab_file = os.path.join(
312
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
313
+ )
314
+ else:
315
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
316
+ with open(vocab_file, "w", encoding="utf-8") as writer:
317
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
318
+ if index != token_index:
319
+ logger.warning(
320
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
321
+ " Please check that the vocabulary is not corrupted!"
322
+ )
323
+ index = token_index
324
+ writer.write(token + "\n")
325
+ index += 1
326
+ return (vocab_file,)
327
+
328
+
329
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
330
+ class BasicTokenizer(object):
331
+ """
332
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
333
+
334
+ Args:
335
+ do_lower_case (`bool`, *optional*, defaults to `True`):
336
+ Whether or not to lowercase the input when tokenizing.
337
+ never_split (`Iterable`, *optional*):
338
+ Collection of tokens which will never be split during tokenization. Only has an effect when
339
+ `do_basic_tokenize=True`
340
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
341
+ Whether or not to tokenize Chinese characters.
342
+
343
+ This should likely be deactivated for Japanese (see this
344
+ [issue](https://github.com/huggingface/transformers/issues/328)).
345
+ strip_accents (`bool`, *optional*):
346
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
347
+ value for `lowercase` (as in the original BERT).
348
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
349
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
350
+ the full context of the words, such as contractions.
351
+ """
352
+
353
+ def __init__(
354
+ self,
355
+ do_lower_case=True,
356
+ never_split=None,
357
+ tokenize_chinese_chars=True,
358
+ strip_accents=None,
359
+ do_split_on_punc=True,
360
+ ):
361
+ if never_split is None:
362
+ never_split = []
363
+ self.do_lower_case = do_lower_case
364
+ self.never_split = set(never_split)
365
+ self.tokenize_chinese_chars = tokenize_chinese_chars
366
+ self.strip_accents = strip_accents
367
+ self.do_split_on_punc = do_split_on_punc
368
+
369
+ def tokenize(self, text, never_split=None):
370
+ """
371
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
372
+
373
+ Args:
374
+ never_split (`List[str]`, *optional*)
375
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
376
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
377
+ """
378
+ # union() returns a new set by concatenating the two sets.
379
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
380
+ text = self._clean_text(text)
381
+
382
+ # This was added on November 1st, 2018 for the multilingual and Chinese
383
+ # models. This is also applied to the English models now, but it doesn't
384
+ # matter since the English models were not trained on any Chinese data
385
+ # and generally don't have any Chinese data in them (there are Chinese
386
+ # characters in the vocabulary because Wikipedia does have some Chinese
387
+ # words in the English Wikipedia.).
388
+ if self.tokenize_chinese_chars:
389
+ text = self._tokenize_chinese_chars(text)
390
+ # prevents treating the same character with different unicode codepoints as different characters
391
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
392
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
393
+ split_tokens = []
394
+ for token in orig_tokens:
395
+ if token not in never_split:
396
+ if self.do_lower_case:
397
+ token = token.lower()
398
+ if self.strip_accents is not False:
399
+ token = self._run_strip_accents(token)
400
+ elif self.strip_accents:
401
+ token = self._run_strip_accents(token)
402
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
403
+
404
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
405
+ return output_tokens
406
+
407
+ def _run_strip_accents(self, text):
408
+ """Strips accents from a piece of text."""
409
+ text = unicodedata.normalize("NFD", text)
410
+ output = []
411
+ for char in text:
412
+ cat = unicodedata.category(char)
413
+ if cat == "Mn":
414
+ continue
415
+ output.append(char)
416
+ return "".join(output)
417
+
418
+ def _run_split_on_punc(self, text, never_split=None):
419
+ """Splits punctuation on a piece of text."""
420
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
421
+ return [text]
422
+ chars = list(text)
423
+ i = 0
424
+ start_new_word = True
425
+ output = []
426
+ while i < len(chars):
427
+ char = chars[i]
428
+ if _is_punctuation(char):
429
+ output.append([char])
430
+ start_new_word = True
431
+ else:
432
+ if start_new_word:
433
+ output.append([])
434
+ start_new_word = False
435
+ output[-1].append(char)
436
+ i += 1
437
+
438
+ return ["".join(x) for x in output]
439
+
440
+ def _tokenize_chinese_chars(self, text):
441
+ """Adds whitespace around any CJK character."""
442
+ output = []
443
+ for char in text:
444
+ cp = ord(char)
445
+ if self._is_chinese_char(cp):
446
+ output.append(" ")
447
+ output.append(char)
448
+ output.append(" ")
449
+ else:
450
+ output.append(char)
451
+ return "".join(output)
452
+
453
+ def _is_chinese_char(self, cp):
454
+ """Checks whether CP is the codepoint of a CJK character."""
455
+ # This defines a "chinese character" as anything in the CJK Unicode block:
456
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
457
+ #
458
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
459
+ # despite its name. The modern Korean Hangul alphabet is a different block,
460
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
461
+ # space-separated words, so they are not treated specially and handled
462
+ # like the all of the other languages.
463
+ if (
464
+ (cp >= 0x4E00 and cp <= 0x9FFF)
465
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
466
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
467
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
468
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
469
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
470
+ or (cp >= 0xF900 and cp <= 0xFAFF)
471
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
472
+ ): #
473
+ return True
474
+
475
+ return False
476
+
477
+ def _clean_text(self, text):
478
+ """Performs invalid character removal and whitespace cleanup on text."""
479
+ output = []
480
+ for char in text:
481
+ cp = ord(char)
482
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
483
+ continue
484
+ if _is_whitespace(char):
485
+ output.append(" ")
486
+ else:
487
+ output.append(char)
488
+ return "".join(output)
489
+
490
+
491
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
492
+ class WordpieceTokenizer(object):
493
+ """Runs WordPiece tokenization."""
494
+
495
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
496
+ self.vocab = vocab
497
+ self.unk_token = unk_token
498
+ self.max_input_chars_per_word = max_input_chars_per_word
499
+
500
+ def tokenize(self, text):
501
+ """
502
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
503
+ tokenization using the given vocabulary.
504
+
505
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
506
+
507
+ Args:
508
+ text: A single token or whitespace separated tokens. This should have
509
+ already been passed through *BasicTokenizer*.
510
+
511
+ Returns:
512
+ A list of wordpiece tokens.
513
+ """
514
+
515
+ output_tokens = []
516
+ for token in whitespace_tokenize(text):
517
+ chars = list(token)
518
+ if len(chars) > self.max_input_chars_per_word:
519
+ output_tokens.append(self.unk_token)
520
+ continue
521
+
522
+ is_bad = False
523
+ start = 0
524
+ sub_tokens = []
525
+ while start < len(chars):
526
+ end = len(chars)
527
+ cur_substr = None
528
+ while start < end:
529
+ substr = "".join(chars[start:end])
530
+ if start > 0:
531
+ substr = "##" + substr
532
+ if substr in self.vocab:
533
+ cur_substr = substr
534
+ break
535
+ end -= 1
536
+ if cur_substr is None:
537
+ is_bad = True
538
+ break
539
+ sub_tokens.append(cur_substr)
540
+ start = end
541
+
542
+ if is_bad:
543
+ output_tokens.append(self.unk_token)
544
+ else:
545
+ output_tokens.extend(sub_tokens)
546
+ return output_tokens
env-llmeval/lib/python3.10/site-packages/transformers/models/electra/tokenization_electra_fast.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import normalizers
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from .tokenization_electra import ElectraTokenizer
23
+
24
+
25
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
26
+
27
+ PRETRAINED_VOCAB_FILES_MAP = {
28
+ "vocab_file": {
29
+ "google/electra-small-generator": (
30
+ "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
31
+ ),
32
+ "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
33
+ "google/electra-large-generator": (
34
+ "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
35
+ ),
36
+ "google/electra-small-discriminator": (
37
+ "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
38
+ ),
39
+ "google/electra-base-discriminator": (
40
+ "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
41
+ ),
42
+ "google/electra-large-discriminator": (
43
+ "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
44
+ ),
45
+ },
46
+ "tokenizer_file": {
47
+ "google/electra-small-generator": (
48
+ "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
49
+ ),
50
+ "google/electra-base-generator": (
51
+ "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
52
+ ),
53
+ "google/electra-large-generator": (
54
+ "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
55
+ ),
56
+ "google/electra-small-discriminator": (
57
+ "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
58
+ ),
59
+ "google/electra-base-discriminator": (
60
+ "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
61
+ ),
62
+ "google/electra-large-discriminator": (
63
+ "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
64
+ ),
65
+ },
66
+ }
67
+
68
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
69
+ "google/electra-small-generator": 512,
70
+ "google/electra-base-generator": 512,
71
+ "google/electra-large-generator": 512,
72
+ "google/electra-small-discriminator": 512,
73
+ "google/electra-base-discriminator": 512,
74
+ "google/electra-large-discriminator": 512,
75
+ }
76
+
77
+ PRETRAINED_INIT_CONFIGURATION = {
78
+ "google/electra-small-generator": {"do_lower_case": True},
79
+ "google/electra-base-generator": {"do_lower_case": True},
80
+ "google/electra-large-generator": {"do_lower_case": True},
81
+ "google/electra-small-discriminator": {"do_lower_case": True},
82
+ "google/electra-base-discriminator": {"do_lower_case": True},
83
+ "google/electra-large-discriminator": {"do_lower_case": True},
84
+ }
85
+
86
+
87
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with Bert->Electra , BERT->ELECTRA
88
+ class ElectraTokenizerFast(PreTrainedTokenizerFast):
89
+ r"""
90
+ Construct a "fast" ELECTRA tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
91
+
92
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
93
+ refer to this superclass for more information regarding those methods.
94
+
95
+ Args:
96
+ vocab_file (`str`):
97
+ File containing the vocabulary.
98
+ do_lower_case (`bool`, *optional*, defaults to `True`):
99
+ Whether or not to lowercase the input when tokenizing.
100
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
101
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
102
+ token instead.
103
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
104
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
105
+ sequence classification or for a text and a question for question answering. It is also used as the last
106
+ token of a sequence built with special tokens.
107
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
108
+ The token used for padding, for example when batching sequences of different lengths.
109
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
110
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
111
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
112
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
113
+ The token used for masking values. This is the token used when training this model with masked language
114
+ modeling. This is the token which the model will try to predict.
115
+ clean_text (`bool`, *optional*, defaults to `True`):
116
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
117
+ whitespaces by the classic one.
118
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
119
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
120
+ issue](https://github.com/huggingface/transformers/issues/328)).
121
+ strip_accents (`bool`, *optional*):
122
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
123
+ value for `lowercase` (as in the original ELECTRA).
124
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
125
+ The prefix for subwords.
126
+ """
127
+
128
+ vocab_files_names = VOCAB_FILES_NAMES
129
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
130
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
131
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
132
+ slow_tokenizer_class = ElectraTokenizer
133
+
134
+ def __init__(
135
+ self,
136
+ vocab_file=None,
137
+ tokenizer_file=None,
138
+ do_lower_case=True,
139
+ unk_token="[UNK]",
140
+ sep_token="[SEP]",
141
+ pad_token="[PAD]",
142
+ cls_token="[CLS]",
143
+ mask_token="[MASK]",
144
+ tokenize_chinese_chars=True,
145
+ strip_accents=None,
146
+ **kwargs,
147
+ ):
148
+ super().__init__(
149
+ vocab_file,
150
+ tokenizer_file=tokenizer_file,
151
+ do_lower_case=do_lower_case,
152
+ unk_token=unk_token,
153
+ sep_token=sep_token,
154
+ pad_token=pad_token,
155
+ cls_token=cls_token,
156
+ mask_token=mask_token,
157
+ tokenize_chinese_chars=tokenize_chinese_chars,
158
+ strip_accents=strip_accents,
159
+ **kwargs,
160
+ )
161
+
162
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
163
+ if (
164
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
165
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
166
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
167
+ ):
168
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
169
+ normalizer_state["lowercase"] = do_lower_case
170
+ normalizer_state["strip_accents"] = strip_accents
171
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
172
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
173
+
174
+ self.do_lower_case = do_lower_case
175
+
176
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
177
+ """
178
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
179
+ adding special tokens. A ELECTRA sequence has the following format:
180
+
181
+ - single sequence: `[CLS] X [SEP]`
182
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
183
+
184
+ Args:
185
+ token_ids_0 (`List[int]`):
186
+ List of IDs to which the special tokens will be added.
187
+ token_ids_1 (`List[int]`, *optional*):
188
+ Optional second list of IDs for sequence pairs.
189
+
190
+ Returns:
191
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
192
+ """
193
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
194
+
195
+ if token_ids_1 is not None:
196
+ output += token_ids_1 + [self.sep_token_id]
197
+
198
+ return output
199
+
200
+ def create_token_type_ids_from_sequences(
201
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
202
+ ) -> List[int]:
203
+ """
204
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ELECTRA sequence
205
+ pair mask has the following format:
206
+
207
+ ```
208
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
209
+ | first sequence | second sequence |
210
+ ```
211
+
212
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
213
+
214
+ Args:
215
+ token_ids_0 (`List[int]`):
216
+ List of IDs.
217
+ token_ids_1 (`List[int]`, *optional*):
218
+ Optional second list of IDs for sequence pairs.
219
+
220
+ Returns:
221
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
222
+ """
223
+ sep = [self.sep_token_id]
224
+ cls = [self.cls_token_id]
225
+ if token_ids_1 is None:
226
+ return len(cls + token_ids_0 + sep) * [0]
227
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
228
+
229
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
230
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
231
+ return tuple(files)
env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+
23
+ try:
24
+ if not is_sentencepiece_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["tokenization_mluke"] = ["MLukeTokenizer"]
30
+
31
+ if TYPE_CHECKING:
32
+ try:
33
+ if not is_sentencepiece_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ from .tokenization_mluke import MLukeTokenizer
39
+
40
+
41
+ else:
42
+ import sys
43
+
44
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (687 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/__pycache__/convert_mluke_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/__pycache__/tokenization_mluke.cpython-310.pyc ADDED
Binary file (50 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert mLUKE checkpoint."""
16
+
17
+ import argparse
18
+ import json
19
+ import os
20
+ from collections import OrderedDict
21
+
22
+ import torch
23
+
24
+ from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
25
+ from transformers.tokenization_utils_base import AddedToken
26
+
27
+
28
+ @torch.no_grad()
29
+ def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size):
30
+ # Load configuration defined in the metadata file
31
+ with open(metadata_path) as metadata_file:
32
+ metadata = json.load(metadata_file)
33
+ config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"])
34
+
35
+ # Load in the weights from the checkpoint_path
36
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["module"]
37
+
38
+ # Load the entity vocab file
39
+ entity_vocab = load_original_entity_vocab(entity_vocab_path)
40
+ # add an entry for [MASK2]
41
+ entity_vocab["[MASK2]"] = max(entity_vocab.values()) + 1
42
+ config.entity_vocab_size += 1
43
+
44
+ tokenizer = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"])
45
+
46
+ # Add special tokens to the token vocabulary for downstream tasks
47
+ entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False)
48
+ entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False)
49
+ tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_1, entity_token_2]})
50
+ config.vocab_size += 2
51
+
52
+ print(f"Saving tokenizer to {pytorch_dump_folder_path}")
53
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
54
+ with open(os.path.join(pytorch_dump_folder_path, "tokenizer_config.json"), "r") as f:
55
+ tokenizer_config = json.load(f)
56
+ tokenizer_config["tokenizer_class"] = "MLukeTokenizer"
57
+ with open(os.path.join(pytorch_dump_folder_path, "tokenizer_config.json"), "w") as f:
58
+ json.dump(tokenizer_config, f)
59
+
60
+ with open(os.path.join(pytorch_dump_folder_path, MLukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f:
61
+ json.dump(entity_vocab, f)
62
+
63
+ tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path)
64
+
65
+ # Initialize the embeddings of the special tokens
66
+ ent_init_index = tokenizer.convert_tokens_to_ids(["@"])[0]
67
+ ent2_init_index = tokenizer.convert_tokens_to_ids(["#"])[0]
68
+
69
+ word_emb = state_dict["embeddings.word_embeddings.weight"]
70
+ ent_emb = word_emb[ent_init_index].unsqueeze(0)
71
+ ent2_emb = word_emb[ent2_init_index].unsqueeze(0)
72
+ state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb])
73
+ # add special tokens for 'entity_predictions.bias'
74
+ for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
75
+ decoder_bias = state_dict[bias_name]
76
+ ent_decoder_bias = decoder_bias[ent_init_index].unsqueeze(0)
77
+ ent2_decoder_bias = decoder_bias[ent2_init_index].unsqueeze(0)
78
+ state_dict[bias_name] = torch.cat([decoder_bias, ent_decoder_bias, ent2_decoder_bias])
79
+
80
+ # Initialize the query layers of the entity-aware self-attention mechanism
81
+ for layer_index in range(config.num_hidden_layers):
82
+ for matrix_name in ["query.weight", "query.bias"]:
83
+ prefix = f"encoder.layer.{layer_index}.attention.self."
84
+ state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name]
85
+ state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name]
86
+ state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name]
87
+
88
+ # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
89
+ entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"]
90
+ entity_mask_emb = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0)
91
+ state_dict["entity_embeddings.entity_embeddings.weight"] = torch.cat([entity_emb, entity_mask_emb])
92
+ # add [MASK2] for 'entity_predictions.bias'
93
+ entity_prediction_bias = state_dict["entity_predictions.bias"]
94
+ entity_mask_bias = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0)
95
+ state_dict["entity_predictions.bias"] = torch.cat([entity_prediction_bias, entity_mask_bias])
96
+
97
+ model = LukeForMaskedLM(config=config).eval()
98
+
99
+ state_dict.pop("entity_predictions.decoder.weight")
100
+ state_dict.pop("lm_head.decoder.weight")
101
+ state_dict.pop("lm_head.decoder.bias")
102
+ state_dict_for_hugging_face = OrderedDict()
103
+ for key, value in state_dict.items():
104
+ if not (key.startswith("lm_head") or key.startswith("entity_predictions")):
105
+ state_dict_for_hugging_face[f"luke.{key}"] = state_dict[key]
106
+ else:
107
+ state_dict_for_hugging_face[key] = state_dict[key]
108
+
109
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict_for_hugging_face, strict=False)
110
+
111
+ if set(unexpected_keys) != {"luke.embeddings.position_ids"}:
112
+ raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}")
113
+ if set(missing_keys) != {
114
+ "lm_head.decoder.weight",
115
+ "lm_head.decoder.bias",
116
+ "entity_predictions.decoder.weight",
117
+ }:
118
+ raise ValueError(f"Unexpected missing_keys: {missing_keys}")
119
+
120
+ model.tie_weights()
121
+ assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
122
+ assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
123
+
124
+ # Check outputs
125
+ tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification")
126
+
127
+ text = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
128
+ span = (0, 9)
129
+ encoding = tokenizer(text, entity_spans=[span], return_tensors="pt")
130
+
131
+ outputs = model(**encoding)
132
+
133
+ # Verify word hidden states
134
+ if model_size == "large":
135
+ raise NotImplementedError
136
+ else: # base
137
+ expected_shape = torch.Size((1, 33, 768))
138
+ expected_slice = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]])
139
+
140
+ if not (outputs.last_hidden_state.shape == expected_shape):
141
+ raise ValueError(
142
+ f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}"
143
+ )
144
+ if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4):
145
+ raise ValueError
146
+
147
+ # Verify entity hidden states
148
+ if model_size == "large":
149
+ raise NotImplementedError
150
+ else: # base
151
+ expected_shape = torch.Size((1, 1, 768))
152
+ expected_slice = torch.tensor([[-0.1482, 0.0609, 0.0322]])
153
+
154
+ if not (outputs.entity_last_hidden_state.shape == expected_shape):
155
+ raise ValueError(
156
+ f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
157
+ f" {expected_shape}"
158
+ )
159
+ if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4):
160
+ raise ValueError
161
+
162
+ # Verify masked word/entity prediction
163
+ tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path)
164
+ text = "Tokyo is the capital of <mask>."
165
+ span = (24, 30)
166
+ encoding = tokenizer(text, entity_spans=[span], return_tensors="pt")
167
+
168
+ outputs = model(**encoding)
169
+
170
+ input_ids = encoding["input_ids"][0].tolist()
171
+ mask_position_id = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>"))
172
+ predicted_id = outputs.logits[0][mask_position_id].argmax(dim=-1)
173
+ assert "Japan" == tokenizer.decode(predicted_id)
174
+
175
+ predicted_entity_id = outputs.entity_logits[0][0].argmax().item()
176
+ multilingual_predicted_entities = [
177
+ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
178
+ ]
179
+ assert [e for e in multilingual_predicted_entities if e.startswith("en:")][0] == "en:Japan"
180
+
181
+ # Finally, save our PyTorch model and tokenizer
182
+ print("Saving PyTorch model to {}".format(pytorch_dump_folder_path))
183
+ model.save_pretrained(pytorch_dump_folder_path)
184
+
185
+
186
+ def load_original_entity_vocab(entity_vocab_path):
187
+ SPECIAL_TOKENS = ["[MASK]", "[PAD]", "[UNK]"]
188
+
189
+ data = [json.loads(line) for line in open(entity_vocab_path)]
190
+
191
+ new_mapping = {}
192
+ for entry in data:
193
+ entity_id = entry["id"]
194
+ for entity_name, language in entry["entities"]:
195
+ if entity_name in SPECIAL_TOKENS:
196
+ new_mapping[entity_name] = entity_id
197
+ break
198
+ new_entity_name = f"{language}:{entity_name}"
199
+ new_mapping[new_entity_name] = entity_id
200
+ return new_mapping
201
+
202
+
203
+ if __name__ == "__main__":
204
+ parser = argparse.ArgumentParser()
205
+ # Required parameters
206
+ parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
207
+ parser.add_argument(
208
+ "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
209
+ )
210
+ parser.add_argument(
211
+ "--entity_vocab_path",
212
+ default=None,
213
+ type=str,
214
+ help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
215
+ )
216
+ parser.add_argument(
217
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
218
+ )
219
+ parser.add_argument(
220
+ "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
221
+ )
222
+ args = parser.parse_args()
223
+ convert_luke_checkpoint(
224
+ args.checkpoint_path,
225
+ args.metadata_path,
226
+ args.entity_vocab_path,
227
+ args.pytorch_dump_folder_path,
228
+ args.model_size,
229
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/mluke/tokenization_mluke.py ADDED
@@ -0,0 +1,1631 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Studio Ousia and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for mLUKE."""
16
+
17
+
18
+ import itertools
19
+ import json
20
+ import os
21
+ from collections.abc import Mapping
22
+ from shutil import copyfile
23
+ from typing import Any, Dict, List, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import sentencepiece as spm
27
+
28
+ from ...tokenization_utils import PreTrainedTokenizer
29
+ from ...tokenization_utils_base import (
30
+ ENCODE_KWARGS_DOCSTRING,
31
+ AddedToken,
32
+ BatchEncoding,
33
+ EncodedInput,
34
+ PaddingStrategy,
35
+ TensorType,
36
+ TextInput,
37
+ TextInputPair,
38
+ TruncationStrategy,
39
+ to_py_obj,
40
+ )
41
+ from ...utils import add_end_docstrings, is_tf_tensor, is_torch_tensor, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ EntitySpan = Tuple[int, int]
47
+ EntitySpanInput = List[EntitySpan]
48
+ Entity = str
49
+ EntityInput = List[Entity]
50
+
51
+ SPIECE_UNDERLINE = "▁"
52
+
53
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "entity_vocab_file": "entity_vocab.json"}
54
+
55
+ PRETRAINED_VOCAB_FILES_MAP = {
56
+ "vocab_file": {
57
+ "studio-ousia/mluke-base": "https://huggingface.co/studio-ousia/mluke-base/resolve/main/vocab.json",
58
+ },
59
+ "merges_file": {
60
+ "studio-ousia/mluke-base": "https://huggingface.co/studio-ousia/mluke-base/resolve/main/merges.txt",
61
+ },
62
+ "entity_vocab_file": {
63
+ "studio-ousia/mluke-base": "https://huggingface.co/studio-ousia/mluke-base/resolve/main/entity_vocab.json",
64
+ },
65
+ }
66
+
67
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
68
+ "studio-ousia/mluke-base": 512,
69
+ }
70
+
71
+ ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
72
+ return_token_type_ids (`bool`, *optional*):
73
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
74
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
75
+
76
+ [What are token type IDs?](../glossary#token-type-ids)
77
+ return_attention_mask (`bool`, *optional*):
78
+ Whether to return the attention mask. If left to the default, will return the attention mask according
79
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
80
+
81
+ [What are attention masks?](../glossary#attention-mask)
82
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
83
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
84
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
85
+ of returning overflowing tokens.
86
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
87
+ Whether or not to return special tokens mask information.
88
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
89
+ Whether or not to return `(char_start, char_end)` for each token.
90
+
91
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
92
+ Python's tokenizer, this method will raise `NotImplementedError`.
93
+ return_length (`bool`, *optional*, defaults to `False`):
94
+ Whether or not to return the lengths of the encoded inputs.
95
+ verbose (`bool`, *optional*, defaults to `True`):
96
+ Whether or not to print more information and warnings.
97
+ **kwargs: passed to the `self.tokenize()` method
98
+
99
+ Return:
100
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
101
+
102
+ - **input_ids** -- List of token ids to be fed to a model.
103
+
104
+ [What are input IDs?](../glossary#input-ids)
105
+
106
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
107
+ if *"token_type_ids"* is in `self.model_input_names`).
108
+
109
+ [What are token type IDs?](../glossary#token-type-ids)
110
+
111
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
112
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
113
+
114
+ [What are attention masks?](../glossary#attention-mask)
115
+
116
+ - **entity_ids** -- List of entity ids to be fed to a model.
117
+
118
+ [What are input IDs?](../glossary#input-ids)
119
+
120
+ - **entity_position_ids** -- List of entity positions in the input sequence to be fed to a model.
121
+
122
+ - **entity_token_type_ids** -- List of entity token type ids to be fed to a model (when
123
+ `return_token_type_ids=True` or if *"entity_token_type_ids"* is in `self.model_input_names`).
124
+
125
+ [What are token type IDs?](../glossary#token-type-ids)
126
+
127
+ - **entity_attention_mask** -- List of indices specifying which entities should be attended to by the model
128
+ (when `return_attention_mask=True` or if *"entity_attention_mask"* is in `self.model_input_names`).
129
+
130
+ [What are attention masks?](../glossary#attention-mask)
131
+
132
+ - **entity_start_positions** -- List of the start positions of entities in the word token sequence (when
133
+ `task="entity_span_classification"`).
134
+ - **entity_end_positions** -- List of the end positions of entities in the word token sequence (when
135
+ `task="entity_span_classification"`).
136
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
137
+ `return_overflowing_tokens=True`).
138
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
139
+ `return_overflowing_tokens=True`).
140
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
141
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
142
+ - **length** -- The length of the inputs (when `return_length=True`)
143
+
144
+ """
145
+
146
+
147
+ class MLukeTokenizer(PreTrainedTokenizer):
148
+ """
149
+ Adapted from [`XLMRobertaTokenizer`] and [`LukeTokenizer`]. Based on
150
+ [SentencePiece](https://github.com/google/sentencepiece).
151
+
152
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
153
+ this superclass for more information regarding those methods.
154
+
155
+ Args:
156
+ vocab_file (`str`):
157
+ Path to the vocabulary file.
158
+ entity_vocab_file (`str`):
159
+ Path to the entity vocabulary file.
160
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
161
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
162
+
163
+ <Tip>
164
+
165
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
166
+ sequence. The token used is the `cls_token`.
167
+
168
+ </Tip>
169
+
170
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
171
+ The end of sequence token.
172
+
173
+ <Tip>
174
+
175
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
176
+ The token used is the `sep_token`.
177
+
178
+ </Tip>
179
+
180
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
181
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
182
+ sequence classification or for a text and a question for question answering. It is also used as the last
183
+ token of a sequence built with special tokens.
184
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
185
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
186
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
187
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
188
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
189
+ token instead.
190
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
191
+ The token used for padding, for example when batching sequences of different lengths.
192
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
193
+ The token used for masking values. This is the token used when training this model with masked language
194
+ modeling. This is the token which the model will try to predict.
195
+ task (`str`, *optional*):
196
+ Task for which you want to prepare sequences. One of `"entity_classification"`,
197
+ `"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity
198
+ sequence is automatically created based on the given entity span(s).
199
+ max_entity_length (`int`, *optional*, defaults to 32):
200
+ The maximum length of `entity_ids`.
201
+ max_mention_length (`int`, *optional*, defaults to 30):
202
+ The maximum number of tokens inside an entity span.
203
+ entity_token_1 (`str`, *optional*, defaults to `<ent>`):
204
+ The special token used to represent an entity span in a word token sequence. This token is only used when
205
+ `task` is set to `"entity_classification"` or `"entity_pair_classification"`.
206
+ entity_token_2 (`str`, *optional*, defaults to `<ent2>`):
207
+ The special token used to represent an entity span in a word token sequence. This token is only used when
208
+ `task` is set to `"entity_pair_classification"`.
209
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
210
+ Additional special tokens used by the tokenizer.
211
+ sp_model_kwargs (`dict`, *optional*):
212
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
213
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
214
+ to set:
215
+
216
+ - `enable_sampling`: Enable subword regularization.
217
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
218
+
219
+ - `nbest_size = {0,1}`: No sampling is performed.
220
+ - `nbest_size > 1`: samples from the nbest_size results.
221
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
222
+ using forward-filtering-and-backward-sampling algorithm.
223
+
224
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
225
+ BPE-dropout.
226
+
227
+ Attributes:
228
+ sp_model (`SentencePieceProcessor`):
229
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
230
+ """
231
+
232
+ vocab_files_names = VOCAB_FILES_NAMES
233
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
234
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
235
+ model_input_names = ["input_ids", "attention_mask"]
236
+
237
+ def __init__(
238
+ self,
239
+ vocab_file,
240
+ entity_vocab_file,
241
+ bos_token="<s>",
242
+ eos_token="</s>",
243
+ sep_token="</s>",
244
+ cls_token="<s>",
245
+ unk_token="<unk>",
246
+ pad_token="<pad>",
247
+ mask_token="<mask>",
248
+ task=None,
249
+ max_entity_length=32,
250
+ max_mention_length=30,
251
+ entity_token_1="<ent>",
252
+ entity_token_2="<ent2>",
253
+ entity_unk_token="[UNK]",
254
+ entity_pad_token="[PAD]",
255
+ entity_mask_token="[MASK]",
256
+ entity_mask2_token="[MASK2]",
257
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
258
+ **kwargs,
259
+ ) -> None:
260
+ # Mask token behave like a normal word, i.e. include the space before it
261
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
262
+
263
+ # we add 2 special tokens for downstream tasks
264
+ # for more information about lstrip and rstrip, see https://github.com/huggingface/transformers/pull/2778
265
+ entity_token_1 = (
266
+ AddedToken(entity_token_1, lstrip=False, rstrip=False)
267
+ if isinstance(entity_token_1, str)
268
+ else entity_token_1
269
+ )
270
+ entity_token_2 = (
271
+ AddedToken(entity_token_2, lstrip=False, rstrip=False)
272
+ if isinstance(entity_token_2, str)
273
+ else entity_token_2
274
+ )
275
+ additional_special_tokens = kwargs.pop("additional_special_tokens", [])
276
+ additional_special_tokens += [entity_token_1, entity_token_2]
277
+
278
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
279
+
280
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
281
+ self.sp_model.Load(str(vocab_file))
282
+ self.vocab_file = vocab_file
283
+
284
+ # Original fairseq vocab and spm vocab must be "aligned":
285
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
286
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
287
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
288
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
289
+
290
+ # Mimic fairseq token-to-id alignment for the first 4 token
291
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
292
+
293
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
294
+ self.fairseq_offset = 1
295
+
296
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset
297
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
298
+
299
+ with open(entity_vocab_file, encoding="utf-8") as entity_vocab_handle:
300
+ self.entity_vocab = json.load(entity_vocab_handle)
301
+ for entity_special_token in [entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token]:
302
+ if entity_special_token not in self.entity_vocab:
303
+ raise ValueError(
304
+ f"Specified entity special token ``{entity_special_token}`` is not found in entity_vocab. "
305
+ f"Probably an incorrect entity vocab file is loaded: {entity_vocab_file}."
306
+ )
307
+ self.entity_unk_token_id = self.entity_vocab[entity_unk_token]
308
+ self.entity_pad_token_id = self.entity_vocab[entity_pad_token]
309
+ self.entity_mask_token_id = self.entity_vocab[entity_mask_token]
310
+ self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token]
311
+
312
+ self.task = task
313
+ if task is None or task == "entity_span_classification":
314
+ self.max_entity_length = max_entity_length
315
+ elif task == "entity_classification":
316
+ self.max_entity_length = 1
317
+ elif task == "entity_pair_classification":
318
+ self.max_entity_length = 2
319
+ else:
320
+ raise ValueError(
321
+ f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification',"
322
+ " 'entity_span_classification'] only."
323
+ )
324
+
325
+ self.max_mention_length = max_mention_length
326
+
327
+ super().__init__(
328
+ bos_token=bos_token,
329
+ eos_token=eos_token,
330
+ unk_token=unk_token,
331
+ sep_token=sep_token,
332
+ cls_token=cls_token,
333
+ pad_token=pad_token,
334
+ mask_token=mask_token,
335
+ sp_model_kwargs=self.sp_model_kwargs,
336
+ task=task,
337
+ max_entity_length=max_entity_length,
338
+ max_mention_length=max_mention_length,
339
+ entity_token_1=entity_token_1,
340
+ entity_token_2=entity_token_2,
341
+ entity_unk_token=entity_unk_token,
342
+ entity_pad_token=entity_pad_token,
343
+ entity_mask_token=entity_mask_token,
344
+ entity_mask2_token=entity_mask2_token,
345
+ additional_special_tokens=additional_special_tokens,
346
+ **kwargs,
347
+ )
348
+
349
+ @property
350
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.vocab_size
351
+ def vocab_size(self):
352
+ return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
353
+
354
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.get_vocab
355
+ def get_vocab(self):
356
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
357
+ vocab.update(self.added_tokens_encoder)
358
+ return vocab
359
+
360
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer._tokenize
361
+ def _tokenize(self, text: str) -> List[str]:
362
+ # TODO check if the t5/llama PR also applies here
363
+ return self.sp_model.encode(text, out_type=str)
364
+
365
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer._convert_token_to_id
366
+ def _convert_token_to_id(self, token):
367
+ """Converts a token (str) in an id using the vocab."""
368
+ if token in self.fairseq_tokens_to_ids:
369
+ return self.fairseq_tokens_to_ids[token]
370
+ spm_id = self.sp_model.PieceToId(token)
371
+
372
+ # Need to return unknown token if the SP model returned 0
373
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
374
+
375
+ def _convert_id_to_token(self, index):
376
+ """Converts an index (integer) in a token (str) using the vocab."""
377
+ if index in self.fairseq_ids_to_tokens:
378
+ return self.fairseq_ids_to_tokens[index]
379
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
380
+
381
+ def convert_tokens_to_string(self, tokens):
382
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
383
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
384
+ return out_string
385
+
386
+ def __getstate__(self):
387
+ state = self.__dict__.copy()
388
+ state["sp_model"] = None
389
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
390
+ return state
391
+
392
+ def __setstate__(self, d):
393
+ self.__dict__ = d
394
+
395
+ # for backward compatibility
396
+ if not hasattr(self, "sp_model_kwargs"):
397
+ self.sp_model_kwargs = {}
398
+
399
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
400
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
401
+
402
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
403
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.__call__
404
+ def __call__(
405
+ self,
406
+ text: Union[TextInput, List[TextInput]],
407
+ text_pair: Optional[Union[TextInput, List[TextInput]]] = None,
408
+ entity_spans: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None,
409
+ entity_spans_pair: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None,
410
+ entities: Optional[Union[EntityInput, List[EntityInput]]] = None,
411
+ entities_pair: Optional[Union[EntityInput, List[EntityInput]]] = None,
412
+ add_special_tokens: bool = True,
413
+ padding: Union[bool, str, PaddingStrategy] = False,
414
+ truncation: Union[bool, str, TruncationStrategy] = None,
415
+ max_length: Optional[int] = None,
416
+ max_entity_length: Optional[int] = None,
417
+ stride: int = 0,
418
+ is_split_into_words: Optional[bool] = False,
419
+ pad_to_multiple_of: Optional[int] = None,
420
+ return_tensors: Optional[Union[str, TensorType]] = None,
421
+ return_token_type_ids: Optional[bool] = None,
422
+ return_attention_mask: Optional[bool] = None,
423
+ return_overflowing_tokens: bool = False,
424
+ return_special_tokens_mask: bool = False,
425
+ return_offsets_mapping: bool = False,
426
+ return_length: bool = False,
427
+ verbose: bool = True,
428
+ **kwargs,
429
+ ) -> BatchEncoding:
430
+ """
431
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
432
+ sequences, depending on the task you want to prepare them for.
433
+
434
+ Args:
435
+ text (`str`, `List[str]`, `List[List[str]]`):
436
+ The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
437
+ tokenizer does not support tokenization based on pretokenized strings.
438
+ text_pair (`str`, `List[str]`, `List[List[str]]`):
439
+ The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
440
+ tokenizer does not support tokenization based on pretokenized strings.
441
+ entity_spans (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*):
442
+ The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
443
+ with two integers denoting character-based start and end positions of entities. If you specify
444
+ `"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor,
445
+ the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each
446
+ sequence must be equal to the length of each sequence of `entities`.
447
+ entity_spans_pair (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*):
448
+ The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
449
+ with two integers denoting character-based start and end positions of entities. If you specify the
450
+ `task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the
451
+ length of each sequence must be equal to the length of each sequence of `entities_pair`.
452
+ entities (`List[str]`, `List[List[str]]`, *optional*):
453
+ The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
454
+ representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
455
+ Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
456
+ each sequence must be equal to the length of each sequence of `entity_spans`. If you specify
457
+ `entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences
458
+ is automatically constructed by filling it with the [MASK] entity.
459
+ entities_pair (`List[str]`, `List[List[str]]`, *optional*):
460
+ The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
461
+ representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
462
+ Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
463
+ each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify
464
+ `entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity
465
+ sequences is automatically constructed by filling it with the [MASK] entity.
466
+ max_entity_length (`int`, *optional*):
467
+ The maximum length of `entity_ids`.
468
+ """
469
+ # Input type checking for clearer error
470
+ is_valid_single_text = isinstance(text, str)
471
+ is_valid_batch_text = isinstance(text, (list, tuple)) and (len(text) == 0 or (isinstance(text[0], str)))
472
+ if not (is_valid_single_text or is_valid_batch_text):
473
+ raise ValueError("text input must be of type `str` (single example) or `List[str]` (batch).")
474
+
475
+ is_valid_single_text_pair = isinstance(text_pair, str)
476
+ is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and (
477
+ len(text_pair) == 0 or isinstance(text_pair[0], str)
478
+ )
479
+ if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair):
480
+ raise ValueError("text_pair input must be of type `str` (single example) or `List[str]` (batch).")
481
+
482
+ is_batched = bool(isinstance(text, (list, tuple)))
483
+
484
+ if is_batched:
485
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
486
+ if entities is None:
487
+ batch_entities_or_entities_pairs = None
488
+ else:
489
+ batch_entities_or_entities_pairs = (
490
+ list(zip(entities, entities_pair)) if entities_pair is not None else entities
491
+ )
492
+
493
+ if entity_spans is None:
494
+ batch_entity_spans_or_entity_spans_pairs = None
495
+ else:
496
+ batch_entity_spans_or_entity_spans_pairs = (
497
+ list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans
498
+ )
499
+
500
+ return self.batch_encode_plus(
501
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
502
+ batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs,
503
+ batch_entities_or_entities_pairs=batch_entities_or_entities_pairs,
504
+ add_special_tokens=add_special_tokens,
505
+ padding=padding,
506
+ truncation=truncation,
507
+ max_length=max_length,
508
+ max_entity_length=max_entity_length,
509
+ stride=stride,
510
+ is_split_into_words=is_split_into_words,
511
+ pad_to_multiple_of=pad_to_multiple_of,
512
+ return_tensors=return_tensors,
513
+ return_token_type_ids=return_token_type_ids,
514
+ return_attention_mask=return_attention_mask,
515
+ return_overflowing_tokens=return_overflowing_tokens,
516
+ return_special_tokens_mask=return_special_tokens_mask,
517
+ return_offsets_mapping=return_offsets_mapping,
518
+ return_length=return_length,
519
+ verbose=verbose,
520
+ **kwargs,
521
+ )
522
+ else:
523
+ return self.encode_plus(
524
+ text=text,
525
+ text_pair=text_pair,
526
+ entity_spans=entity_spans,
527
+ entity_spans_pair=entity_spans_pair,
528
+ entities=entities,
529
+ entities_pair=entities_pair,
530
+ add_special_tokens=add_special_tokens,
531
+ padding=padding,
532
+ truncation=truncation,
533
+ max_length=max_length,
534
+ max_entity_length=max_entity_length,
535
+ stride=stride,
536
+ is_split_into_words=is_split_into_words,
537
+ pad_to_multiple_of=pad_to_multiple_of,
538
+ return_tensors=return_tensors,
539
+ return_token_type_ids=return_token_type_ids,
540
+ return_attention_mask=return_attention_mask,
541
+ return_overflowing_tokens=return_overflowing_tokens,
542
+ return_special_tokens_mask=return_special_tokens_mask,
543
+ return_offsets_mapping=return_offsets_mapping,
544
+ return_length=return_length,
545
+ verbose=verbose,
546
+ **kwargs,
547
+ )
548
+
549
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._encode_plus
550
+ def _encode_plus(
551
+ self,
552
+ text: Union[TextInput],
553
+ text_pair: Optional[Union[TextInput]] = None,
554
+ entity_spans: Optional[EntitySpanInput] = None,
555
+ entity_spans_pair: Optional[EntitySpanInput] = None,
556
+ entities: Optional[EntityInput] = None,
557
+ entities_pair: Optional[EntityInput] = None,
558
+ add_special_tokens: bool = True,
559
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
560
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
561
+ max_length: Optional[int] = None,
562
+ max_entity_length: Optional[int] = None,
563
+ stride: int = 0,
564
+ is_split_into_words: Optional[bool] = False,
565
+ pad_to_multiple_of: Optional[int] = None,
566
+ return_tensors: Optional[Union[str, TensorType]] = None,
567
+ return_token_type_ids: Optional[bool] = None,
568
+ return_attention_mask: Optional[bool] = None,
569
+ return_overflowing_tokens: bool = False,
570
+ return_special_tokens_mask: bool = False,
571
+ return_offsets_mapping: bool = False,
572
+ return_length: bool = False,
573
+ verbose: bool = True,
574
+ **kwargs,
575
+ ) -> BatchEncoding:
576
+ if return_offsets_mapping:
577
+ raise NotImplementedError(
578
+ "return_offset_mapping is not available when using Python tokenizers. "
579
+ "To use this feature, change your tokenizer to one deriving from "
580
+ "transformers.PreTrainedTokenizerFast. "
581
+ "More information on available tokenizers at "
582
+ "https://github.com/huggingface/transformers/pull/2674"
583
+ )
584
+
585
+ if is_split_into_words:
586
+ raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
587
+
588
+ (
589
+ first_ids,
590
+ second_ids,
591
+ first_entity_ids,
592
+ second_entity_ids,
593
+ first_entity_token_spans,
594
+ second_entity_token_spans,
595
+ ) = self._create_input_sequence(
596
+ text=text,
597
+ text_pair=text_pair,
598
+ entities=entities,
599
+ entities_pair=entities_pair,
600
+ entity_spans=entity_spans,
601
+ entity_spans_pair=entity_spans_pair,
602
+ **kwargs,
603
+ )
604
+
605
+ # prepare_for_model will create the attention_mask and token_type_ids
606
+ return self.prepare_for_model(
607
+ first_ids,
608
+ pair_ids=second_ids,
609
+ entity_ids=first_entity_ids,
610
+ pair_entity_ids=second_entity_ids,
611
+ entity_token_spans=first_entity_token_spans,
612
+ pair_entity_token_spans=second_entity_token_spans,
613
+ add_special_tokens=add_special_tokens,
614
+ padding=padding_strategy.value,
615
+ truncation=truncation_strategy.value,
616
+ max_length=max_length,
617
+ max_entity_length=max_entity_length,
618
+ stride=stride,
619
+ pad_to_multiple_of=pad_to_multiple_of,
620
+ return_tensors=return_tensors,
621
+ prepend_batch_axis=True,
622
+ return_attention_mask=return_attention_mask,
623
+ return_token_type_ids=return_token_type_ids,
624
+ return_overflowing_tokens=return_overflowing_tokens,
625
+ return_special_tokens_mask=return_special_tokens_mask,
626
+ return_length=return_length,
627
+ verbose=verbose,
628
+ )
629
+
630
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._batch_encode_plus
631
+ def _batch_encode_plus(
632
+ self,
633
+ batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair]],
634
+ batch_entity_spans_or_entity_spans_pairs: Optional[
635
+ Union[List[EntitySpanInput], List[Tuple[EntitySpanInput, EntitySpanInput]]]
636
+ ] = None,
637
+ batch_entities_or_entities_pairs: Optional[
638
+ Union[List[EntityInput], List[Tuple[EntityInput, EntityInput]]]
639
+ ] = None,
640
+ add_special_tokens: bool = True,
641
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
642
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
643
+ max_length: Optional[int] = None,
644
+ max_entity_length: Optional[int] = None,
645
+ stride: int = 0,
646
+ is_split_into_words: Optional[bool] = False,
647
+ pad_to_multiple_of: Optional[int] = None,
648
+ return_tensors: Optional[Union[str, TensorType]] = None,
649
+ return_token_type_ids: Optional[bool] = None,
650
+ return_attention_mask: Optional[bool] = None,
651
+ return_overflowing_tokens: bool = False,
652
+ return_special_tokens_mask: bool = False,
653
+ return_offsets_mapping: bool = False,
654
+ return_length: bool = False,
655
+ verbose: bool = True,
656
+ **kwargs,
657
+ ) -> BatchEncoding:
658
+ if return_offsets_mapping:
659
+ raise NotImplementedError(
660
+ "return_offset_mapping is not available when using Python tokenizers. "
661
+ "To use this feature, change your tokenizer to one deriving from "
662
+ "transformers.PreTrainedTokenizerFast."
663
+ )
664
+
665
+ if is_split_into_words:
666
+ raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
667
+
668
+ # input_ids is a list of tuples (one for each example in the batch)
669
+ input_ids = []
670
+ entity_ids = []
671
+ entity_token_spans = []
672
+ for index, text_or_text_pair in enumerate(batch_text_or_text_pairs):
673
+ if not isinstance(text_or_text_pair, (list, tuple)):
674
+ text, text_pair = text_or_text_pair, None
675
+ else:
676
+ text, text_pair = text_or_text_pair
677
+
678
+ entities, entities_pair = None, None
679
+ if batch_entities_or_entities_pairs is not None:
680
+ entities_or_entities_pairs = batch_entities_or_entities_pairs[index]
681
+ if entities_or_entities_pairs:
682
+ if isinstance(entities_or_entities_pairs[0], str):
683
+ entities, entities_pair = entities_or_entities_pairs, None
684
+ else:
685
+ entities, entities_pair = entities_or_entities_pairs
686
+
687
+ entity_spans, entity_spans_pair = None, None
688
+ if batch_entity_spans_or_entity_spans_pairs is not None:
689
+ entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index]
690
+ if len(entity_spans_or_entity_spans_pairs) > 0 and isinstance(
691
+ entity_spans_or_entity_spans_pairs[0], list
692
+ ):
693
+ entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs
694
+ else:
695
+ entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs, None
696
+
697
+ (
698
+ first_ids,
699
+ second_ids,
700
+ first_entity_ids,
701
+ second_entity_ids,
702
+ first_entity_token_spans,
703
+ second_entity_token_spans,
704
+ ) = self._create_input_sequence(
705
+ text=text,
706
+ text_pair=text_pair,
707
+ entities=entities,
708
+ entities_pair=entities_pair,
709
+ entity_spans=entity_spans,
710
+ entity_spans_pair=entity_spans_pair,
711
+ **kwargs,
712
+ )
713
+ input_ids.append((first_ids, second_ids))
714
+ entity_ids.append((first_entity_ids, second_entity_ids))
715
+ entity_token_spans.append((first_entity_token_spans, second_entity_token_spans))
716
+
717
+ batch_outputs = self._batch_prepare_for_model(
718
+ input_ids,
719
+ batch_entity_ids_pairs=entity_ids,
720
+ batch_entity_token_spans_pairs=entity_token_spans,
721
+ add_special_tokens=add_special_tokens,
722
+ padding_strategy=padding_strategy,
723
+ truncation_strategy=truncation_strategy,
724
+ max_length=max_length,
725
+ max_entity_length=max_entity_length,
726
+ stride=stride,
727
+ pad_to_multiple_of=pad_to_multiple_of,
728
+ return_attention_mask=return_attention_mask,
729
+ return_token_type_ids=return_token_type_ids,
730
+ return_overflowing_tokens=return_overflowing_tokens,
731
+ return_special_tokens_mask=return_special_tokens_mask,
732
+ return_length=return_length,
733
+ return_tensors=return_tensors,
734
+ verbose=verbose,
735
+ )
736
+
737
+ return BatchEncoding(batch_outputs)
738
+
739
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._check_entity_input_format
740
+ def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]):
741
+ if not isinstance(entity_spans, list):
742
+ raise ValueError("entity_spans should be given as a list")
743
+ elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple):
744
+ raise ValueError(
745
+ "entity_spans should be given as a list of tuples containing the start and end character indices"
746
+ )
747
+
748
+ if entities is not None:
749
+ if not isinstance(entities, list):
750
+ raise ValueError("If you specify entities, they should be given as a list")
751
+
752
+ if len(entities) > 0 and not isinstance(entities[0], str):
753
+ raise ValueError("If you specify entities, they should be given as a list of entity names")
754
+
755
+ if len(entities) != len(entity_spans):
756
+ raise ValueError("If you specify entities, entities and entity_spans must be the same length")
757
+
758
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._create_input_sequence
759
+ def _create_input_sequence(
760
+ self,
761
+ text: Union[TextInput],
762
+ text_pair: Optional[Union[TextInput]] = None,
763
+ entities: Optional[EntityInput] = None,
764
+ entities_pair: Optional[EntityInput] = None,
765
+ entity_spans: Optional[EntitySpanInput] = None,
766
+ entity_spans_pair: Optional[EntitySpanInput] = None,
767
+ **kwargs,
768
+ ) -> Tuple[list, list, list, list, list, list]:
769
+ def get_input_ids(text):
770
+ tokens = self.tokenize(text, **kwargs)
771
+ return self.convert_tokens_to_ids(tokens)
772
+
773
+ def get_input_ids_and_entity_token_spans(text, entity_spans):
774
+ if entity_spans is None:
775
+ return get_input_ids(text), None
776
+
777
+ cur = 0
778
+ input_ids = []
779
+ entity_token_spans = [None] * len(entity_spans)
780
+
781
+ split_char_positions = sorted(frozenset(itertools.chain(*entity_spans)))
782
+ char_pos2token_pos = {}
783
+
784
+ for split_char_position in split_char_positions:
785
+ orig_split_char_position = split_char_position
786
+ if (
787
+ split_char_position > 0 and text[split_char_position - 1] == " "
788
+ ): # whitespace should be prepended to the following token
789
+ split_char_position -= 1
790
+ if cur != split_char_position:
791
+ input_ids += get_input_ids(text[cur:split_char_position])
792
+ cur = split_char_position
793
+ char_pos2token_pos[orig_split_char_position] = len(input_ids)
794
+
795
+ input_ids += get_input_ids(text[cur:])
796
+
797
+ entity_token_spans = [
798
+ (char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans
799
+ ]
800
+
801
+ return input_ids, entity_token_spans
802
+
803
+ first_ids, second_ids = None, None
804
+ first_entity_ids, second_entity_ids = None, None
805
+ first_entity_token_spans, second_entity_token_spans = None, None
806
+
807
+ if self.task is None:
808
+ if entity_spans is None:
809
+ first_ids = get_input_ids(text)
810
+ else:
811
+ self._check_entity_input_format(entities, entity_spans)
812
+
813
+ first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
814
+ if entities is None:
815
+ first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
816
+ else:
817
+ first_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities]
818
+
819
+ if text_pair is not None:
820
+ if entity_spans_pair is None:
821
+ second_ids = get_input_ids(text_pair)
822
+ else:
823
+ self._check_entity_input_format(entities_pair, entity_spans_pair)
824
+
825
+ second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans(
826
+ text_pair, entity_spans_pair
827
+ )
828
+ if entities_pair is None:
829
+ second_entity_ids = [self.entity_mask_token_id] * len(entity_spans_pair)
830
+ else:
831
+ second_entity_ids = [
832
+ self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities_pair
833
+ ]
834
+
835
+ elif self.task == "entity_classification":
836
+ if not (isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)):
837
+ raise ValueError(
838
+ "Entity spans should be a list containing a single tuple "
839
+ "containing the start and end character indices of an entity"
840
+ )
841
+ first_entity_ids = [self.entity_mask_token_id]
842
+ first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
843
+
844
+ # add special tokens to input ids
845
+ entity_token_start, entity_token_end = first_entity_token_spans[0]
846
+ first_ids = (
847
+ first_ids[:entity_token_end] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_end:]
848
+ )
849
+ first_ids = (
850
+ first_ids[:entity_token_start]
851
+ + [self.additional_special_tokens_ids[0]]
852
+ + first_ids[entity_token_start:]
853
+ )
854
+ first_entity_token_spans = [(entity_token_start, entity_token_end + 2)]
855
+
856
+ elif self.task == "entity_pair_classification":
857
+ if not (
858
+ isinstance(entity_spans, list)
859
+ and len(entity_spans) == 2
860
+ and isinstance(entity_spans[0], tuple)
861
+ and isinstance(entity_spans[1], tuple)
862
+ ):
863
+ raise ValueError(
864
+ "Entity spans should be provided as a list of two tuples, "
865
+ "each tuple containing the start and end character indices of an entity"
866
+ )
867
+
868
+ head_span, tail_span = entity_spans
869
+ first_entity_ids = [self.entity_mask_token_id, self.entity_mask2_token_id]
870
+ first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
871
+
872
+ head_token_span, tail_token_span = first_entity_token_spans
873
+ token_span_with_special_token_ids = [
874
+ (head_token_span, self.additional_special_tokens_ids[0]),
875
+ (tail_token_span, self.additional_special_tokens_ids[1]),
876
+ ]
877
+ if head_token_span[0] < tail_token_span[0]:
878
+ first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2)
879
+ first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4)
880
+ token_span_with_special_token_ids = reversed(token_span_with_special_token_ids)
881
+ else:
882
+ first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4)
883
+ first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2)
884
+
885
+ for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids:
886
+ first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:]
887
+ first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:]
888
+
889
+ elif self.task == "entity_span_classification":
890
+ if not (isinstance(entity_spans, list) and len(entity_spans) > 0 and isinstance(entity_spans[0], tuple)):
891
+ raise ValueError(
892
+ "Entity spans should be provided as a list of tuples, "
893
+ "each tuple containing the start and end character indices of an entity"
894
+ )
895
+
896
+ first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
897
+ first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
898
+
899
+ else:
900
+ raise ValueError(f"Task {self.task} not supported")
901
+
902
+ return (
903
+ first_ids,
904
+ second_ids,
905
+ first_entity_ids,
906
+ second_entity_ids,
907
+ first_entity_token_spans,
908
+ second_entity_token_spans,
909
+ )
910
+
911
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
912
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._batch_prepare_for_model
913
+ def _batch_prepare_for_model(
914
+ self,
915
+ batch_ids_pairs: List[Tuple[List[int], None]],
916
+ batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]],
917
+ batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]],
918
+ add_special_tokens: bool = True,
919
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
920
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
921
+ max_length: Optional[int] = None,
922
+ max_entity_length: Optional[int] = None,
923
+ stride: int = 0,
924
+ pad_to_multiple_of: Optional[int] = None,
925
+ return_tensors: Optional[str] = None,
926
+ return_token_type_ids: Optional[bool] = None,
927
+ return_attention_mask: Optional[bool] = None,
928
+ return_overflowing_tokens: bool = False,
929
+ return_special_tokens_mask: bool = False,
930
+ return_length: bool = False,
931
+ verbose: bool = True,
932
+ ) -> BatchEncoding:
933
+ """
934
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
935
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
936
+ manages a moving window (with user defined stride) for overflowing tokens
937
+
938
+
939
+ Args:
940
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
941
+ batch_entity_ids_pairs: list of entity ids or entity ids pairs
942
+ batch_entity_token_spans_pairs: list of entity spans or entity spans pairs
943
+ max_entity_length: The maximum length of the entity sequence.
944
+ """
945
+
946
+ batch_outputs = {}
947
+ for input_ids, entity_ids, entity_token_span_pairs in zip(
948
+ batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs
949
+ ):
950
+ first_ids, second_ids = input_ids
951
+ first_entity_ids, second_entity_ids = entity_ids
952
+ first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
953
+ outputs = self.prepare_for_model(
954
+ first_ids,
955
+ second_ids,
956
+ entity_ids=first_entity_ids,
957
+ pair_entity_ids=second_entity_ids,
958
+ entity_token_spans=first_entity_token_spans,
959
+ pair_entity_token_spans=second_entity_token_spans,
960
+ add_special_tokens=add_special_tokens,
961
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
962
+ truncation=truncation_strategy.value,
963
+ max_length=max_length,
964
+ max_entity_length=max_entity_length,
965
+ stride=stride,
966
+ pad_to_multiple_of=None, # we pad in batch afterward
967
+ return_attention_mask=False, # we pad in batch afterward
968
+ return_token_type_ids=return_token_type_ids,
969
+ return_overflowing_tokens=return_overflowing_tokens,
970
+ return_special_tokens_mask=return_special_tokens_mask,
971
+ return_length=return_length,
972
+ return_tensors=None, # We convert the whole batch to tensors at the end
973
+ prepend_batch_axis=False,
974
+ verbose=verbose,
975
+ )
976
+
977
+ for key, value in outputs.items():
978
+ if key not in batch_outputs:
979
+ batch_outputs[key] = []
980
+ batch_outputs[key].append(value)
981
+
982
+ batch_outputs = self.pad(
983
+ batch_outputs,
984
+ padding=padding_strategy.value,
985
+ max_length=max_length,
986
+ pad_to_multiple_of=pad_to_multiple_of,
987
+ return_attention_mask=return_attention_mask,
988
+ )
989
+
990
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
991
+
992
+ return batch_outputs
993
+
994
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
995
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.prepare_for_model
996
+ def prepare_for_model(
997
+ self,
998
+ ids: List[int],
999
+ pair_ids: Optional[List[int]] = None,
1000
+ entity_ids: Optional[List[int]] = None,
1001
+ pair_entity_ids: Optional[List[int]] = None,
1002
+ entity_token_spans: Optional[List[Tuple[int, int]]] = None,
1003
+ pair_entity_token_spans: Optional[List[Tuple[int, int]]] = None,
1004
+ add_special_tokens: bool = True,
1005
+ padding: Union[bool, str, PaddingStrategy] = False,
1006
+ truncation: Union[bool, str, TruncationStrategy] = None,
1007
+ max_length: Optional[int] = None,
1008
+ max_entity_length: Optional[int] = None,
1009
+ stride: int = 0,
1010
+ pad_to_multiple_of: Optional[int] = None,
1011
+ return_tensors: Optional[Union[str, TensorType]] = None,
1012
+ return_token_type_ids: Optional[bool] = None,
1013
+ return_attention_mask: Optional[bool] = None,
1014
+ return_overflowing_tokens: bool = False,
1015
+ return_special_tokens_mask: bool = False,
1016
+ return_offsets_mapping: bool = False,
1017
+ return_length: bool = False,
1018
+ verbose: bool = True,
1019
+ prepend_batch_axis: bool = False,
1020
+ **kwargs,
1021
+ ) -> BatchEncoding:
1022
+ """
1023
+ Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids,
1024
+ entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing
1025
+ while taking into account the special tokens and manages a moving window (with user defined stride) for
1026
+ overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first*
1027
+ or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an
1028
+ error.
1029
+
1030
+ Args:
1031
+ ids (`List[int]`):
1032
+ Tokenized input ids of the first sequence.
1033
+ pair_ids (`List[int]`, *optional*):
1034
+ Tokenized input ids of the second sequence.
1035
+ entity_ids (`List[int]`, *optional*):
1036
+ Entity ids of the first sequence.
1037
+ pair_entity_ids (`List[int]`, *optional*):
1038
+ Entity ids of the second sequence.
1039
+ entity_token_spans (`List[Tuple[int, int]]`, *optional*):
1040
+ Entity spans of the first sequence.
1041
+ pair_entity_token_spans (`List[Tuple[int, int]]`, *optional*):
1042
+ Entity spans of the second sequence.
1043
+ max_entity_length (`int`, *optional*):
1044
+ The maximum length of the entity sequence.
1045
+ """
1046
+
1047
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
1048
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
1049
+ padding=padding,
1050
+ truncation=truncation,
1051
+ max_length=max_length,
1052
+ pad_to_multiple_of=pad_to_multiple_of,
1053
+ verbose=verbose,
1054
+ **kwargs,
1055
+ )
1056
+
1057
+ # Compute lengths
1058
+ pair = bool(pair_ids is not None)
1059
+ len_ids = len(ids)
1060
+ len_pair_ids = len(pair_ids) if pair else 0
1061
+
1062
+ if return_token_type_ids and not add_special_tokens:
1063
+ raise ValueError(
1064
+ "Asking to return token_type_ids while setting add_special_tokens to False "
1065
+ "results in an undefined behavior. Please set add_special_tokens to True or "
1066
+ "set return_token_type_ids to None."
1067
+ )
1068
+ if (
1069
+ return_overflowing_tokens
1070
+ and truncation_strategy == TruncationStrategy.LONGEST_FIRST
1071
+ and pair_ids is not None
1072
+ ):
1073
+ raise ValueError(
1074
+ "Not possible to return overflowing tokens for pair of sequences with the "
1075
+ "`longest_first`. Please select another truncation strategy than `longest_first`, "
1076
+ "for instance `only_second` or `only_first`."
1077
+ )
1078
+
1079
+ # Load from model defaults
1080
+ if return_token_type_ids is None:
1081
+ return_token_type_ids = "token_type_ids" in self.model_input_names
1082
+ if return_attention_mask is None:
1083
+ return_attention_mask = "attention_mask" in self.model_input_names
1084
+
1085
+ encoded_inputs = {}
1086
+
1087
+ # Compute the total size of the returned word encodings
1088
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
1089
+
1090
+ # Truncation: Handle max sequence length and max_entity_length
1091
+ overflowing_tokens = []
1092
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
1093
+ # truncate words up to max_length
1094
+ ids, pair_ids, overflowing_tokens = self.truncate_sequences(
1095
+ ids,
1096
+ pair_ids=pair_ids,
1097
+ num_tokens_to_remove=total_len - max_length,
1098
+ truncation_strategy=truncation_strategy,
1099
+ stride=stride,
1100
+ )
1101
+
1102
+ if return_overflowing_tokens:
1103
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
1104
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
1105
+
1106
+ # Add special tokens
1107
+ if add_special_tokens:
1108
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
1109
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
1110
+ entity_token_offset = 1 # 1 * <s> token
1111
+ pair_entity_token_offset = len(ids) + 3 # 1 * <s> token & 2 * <sep> tokens
1112
+ else:
1113
+ sequence = ids + pair_ids if pair else ids
1114
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
1115
+ entity_token_offset = 0
1116
+ pair_entity_token_offset = len(ids)
1117
+
1118
+ # Build output dictionary
1119
+ encoded_inputs["input_ids"] = sequence
1120
+ if return_token_type_ids:
1121
+ encoded_inputs["token_type_ids"] = token_type_ids
1122
+ if return_special_tokens_mask:
1123
+ if add_special_tokens:
1124
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
1125
+ else:
1126
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
1127
+
1128
+ # Set max entity length
1129
+ if not max_entity_length:
1130
+ max_entity_length = self.max_entity_length
1131
+
1132
+ if entity_ids is not None:
1133
+ total_entity_len = 0
1134
+ num_invalid_entities = 0
1135
+ valid_entity_ids = [ent_id for ent_id, span in zip(entity_ids, entity_token_spans) if span[1] <= len(ids)]
1136
+ valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)]
1137
+
1138
+ total_entity_len += len(valid_entity_ids)
1139
+ num_invalid_entities += len(entity_ids) - len(valid_entity_ids)
1140
+
1141
+ valid_pair_entity_ids, valid_pair_entity_token_spans = None, None
1142
+ if pair_entity_ids is not None:
1143
+ valid_pair_entity_ids = [
1144
+ ent_id
1145
+ for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans)
1146
+ if span[1] <= len(pair_ids)
1147
+ ]
1148
+ valid_pair_entity_token_spans = [span for span in pair_entity_token_spans if span[1] <= len(pair_ids)]
1149
+ total_entity_len += len(valid_pair_entity_ids)
1150
+ num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids)
1151
+
1152
+ if num_invalid_entities != 0:
1153
+ logger.warning(
1154
+ f"{num_invalid_entities} entities are ignored because their entity spans are invalid due to the"
1155
+ " truncation of input tokens"
1156
+ )
1157
+
1158
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and total_entity_len > max_entity_length:
1159
+ # truncate entities up to max_entity_length
1160
+ valid_entity_ids, valid_pair_entity_ids, overflowing_entities = self.truncate_sequences(
1161
+ valid_entity_ids,
1162
+ pair_ids=valid_pair_entity_ids,
1163
+ num_tokens_to_remove=total_entity_len - max_entity_length,
1164
+ truncation_strategy=truncation_strategy,
1165
+ stride=stride,
1166
+ )
1167
+ valid_entity_token_spans = valid_entity_token_spans[: len(valid_entity_ids)]
1168
+ if valid_pair_entity_token_spans is not None:
1169
+ valid_pair_entity_token_spans = valid_pair_entity_token_spans[: len(valid_pair_entity_ids)]
1170
+
1171
+ if return_overflowing_tokens:
1172
+ encoded_inputs["overflowing_entities"] = overflowing_entities
1173
+ encoded_inputs["num_truncated_entities"] = total_entity_len - max_entity_length
1174
+
1175
+ final_entity_ids = valid_entity_ids + valid_pair_entity_ids if valid_pair_entity_ids else valid_entity_ids
1176
+ encoded_inputs["entity_ids"] = list(final_entity_ids)
1177
+ entity_position_ids = []
1178
+ entity_start_positions = []
1179
+ entity_end_positions = []
1180
+ for token_spans, offset in (
1181
+ (valid_entity_token_spans, entity_token_offset),
1182
+ (valid_pair_entity_token_spans, pair_entity_token_offset),
1183
+ ):
1184
+ if token_spans is not None:
1185
+ for start, end in token_spans:
1186
+ start += offset
1187
+ end += offset
1188
+ position_ids = list(range(start, end))[: self.max_mention_length]
1189
+ position_ids += [-1] * (self.max_mention_length - end + start)
1190
+ entity_position_ids.append(position_ids)
1191
+ entity_start_positions.append(start)
1192
+ entity_end_positions.append(end - 1)
1193
+
1194
+ encoded_inputs["entity_position_ids"] = entity_position_ids
1195
+ if self.task == "entity_span_classification":
1196
+ encoded_inputs["entity_start_positions"] = entity_start_positions
1197
+ encoded_inputs["entity_end_positions"] = entity_end_positions
1198
+
1199
+ if return_token_type_ids:
1200
+ encoded_inputs["entity_token_type_ids"] = [0] * len(encoded_inputs["entity_ids"])
1201
+
1202
+ # Check lengths
1203
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
1204
+
1205
+ # Padding
1206
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
1207
+ encoded_inputs = self.pad(
1208
+ encoded_inputs,
1209
+ max_length=max_length,
1210
+ max_entity_length=max_entity_length,
1211
+ padding=padding_strategy.value,
1212
+ pad_to_multiple_of=pad_to_multiple_of,
1213
+ return_attention_mask=return_attention_mask,
1214
+ )
1215
+
1216
+ if return_length:
1217
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
1218
+
1219
+ batch_outputs = BatchEncoding(
1220
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
1221
+ )
1222
+
1223
+ return batch_outputs
1224
+
1225
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.pad
1226
+ def pad(
1227
+ self,
1228
+ encoded_inputs: Union[
1229
+ BatchEncoding,
1230
+ List[BatchEncoding],
1231
+ Dict[str, EncodedInput],
1232
+ Dict[str, List[EncodedInput]],
1233
+ List[Dict[str, EncodedInput]],
1234
+ ],
1235
+ padding: Union[bool, str, PaddingStrategy] = True,
1236
+ max_length: Optional[int] = None,
1237
+ max_entity_length: Optional[int] = None,
1238
+ pad_to_multiple_of: Optional[int] = None,
1239
+ return_attention_mask: Optional[bool] = None,
1240
+ return_tensors: Optional[Union[str, TensorType]] = None,
1241
+ verbose: bool = True,
1242
+ ) -> BatchEncoding:
1243
+ """
1244
+ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
1245
+ in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with
1246
+ `self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed
1247
+ are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless
1248
+ you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the
1249
+ specific device of your tensors however.
1250
+
1251
+ Args:
1252
+ encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
1253
+ Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
1254
+ tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
1255
+ List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
1256
+ collate function. Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or
1257
+ TensorFlow tensors), see the note above for the return type.
1258
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
1259
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
1260
+ index) among:
1261
+
1262
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
1263
+ sequence if provided).
1264
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
1265
+ acceptable input length for the model if that argument is not provided.
1266
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
1267
+ lengths).
1268
+ max_length (`int`, *optional*):
1269
+ Maximum length of the returned list and optionally padding length (see above).
1270
+ max_entity_length (`int`, *optional*):
1271
+ The maximum length of the entity sequence.
1272
+ pad_to_multiple_of (`int`, *optional*):
1273
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
1274
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
1275
+ return_attention_mask (`bool`, *optional*):
1276
+ Whether to return the attention mask. If left to the default, will return the attention mask according
1277
+ to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention
1278
+ masks?](../glossary#attention-mask)
1279
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
1280
+ If set, will return tensors instead of list of python integers. Acceptable values are:
1281
+
1282
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
1283
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
1284
+ - `'np'`: Return Numpy `np.ndarray` objects.
1285
+ verbose (`bool`, *optional*, defaults to `True`):
1286
+ Whether or not to print more information and warnings.
1287
+ """
1288
+ # If we have a list of dicts, let's convert it in a dict of lists
1289
+ # We do this to allow using this method as a collate_fn function in PyTorch Dataloader
1290
+ if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping):
1291
+ encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
1292
+
1293
+ # The model's main input name, usually `input_ids`, has be passed for padding
1294
+ if self.model_input_names[0] not in encoded_inputs:
1295
+ raise ValueError(
1296
+ "You should supply an encoding or a list of encodings to this method "
1297
+ f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
1298
+ )
1299
+
1300
+ required_input = encoded_inputs[self.model_input_names[0]]
1301
+
1302
+ if not required_input:
1303
+ if return_attention_mask:
1304
+ encoded_inputs["attention_mask"] = []
1305
+ return encoded_inputs
1306
+
1307
+ # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
1308
+ # and rebuild them afterwards if no return_tensors is specified
1309
+ # Note that we lose the specific device the tensor may be on for PyTorch
1310
+
1311
+ first_element = required_input[0]
1312
+ if isinstance(first_element, (list, tuple)):
1313
+ # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
1314
+ index = 0
1315
+ while len(required_input[index]) == 0:
1316
+ index += 1
1317
+ if index < len(required_input):
1318
+ first_element = required_input[index][0]
1319
+ # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
1320
+ if not isinstance(first_element, (int, list, tuple)):
1321
+ if is_tf_tensor(first_element):
1322
+ return_tensors = "tf" if return_tensors is None else return_tensors
1323
+ elif is_torch_tensor(first_element):
1324
+ return_tensors = "pt" if return_tensors is None else return_tensors
1325
+ elif isinstance(first_element, np.ndarray):
1326
+ return_tensors = "np" if return_tensors is None else return_tensors
1327
+ else:
1328
+ raise ValueError(
1329
+ f"type of {first_element} unknown: {type(first_element)}. "
1330
+ "Should be one of a python, numpy, pytorch or tensorflow object."
1331
+ )
1332
+
1333
+ for key, value in encoded_inputs.items():
1334
+ encoded_inputs[key] = to_py_obj(value)
1335
+
1336
+ # Convert padding_strategy in PaddingStrategy
1337
+ padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
1338
+ padding=padding, max_length=max_length, verbose=verbose
1339
+ )
1340
+
1341
+ if max_entity_length is None:
1342
+ max_entity_length = self.max_entity_length
1343
+
1344
+ required_input = encoded_inputs[self.model_input_names[0]]
1345
+ if required_input and not isinstance(required_input[0], (list, tuple)):
1346
+ encoded_inputs = self._pad(
1347
+ encoded_inputs,
1348
+ max_length=max_length,
1349
+ max_entity_length=max_entity_length,
1350
+ padding_strategy=padding_strategy,
1351
+ pad_to_multiple_of=pad_to_multiple_of,
1352
+ return_attention_mask=return_attention_mask,
1353
+ )
1354
+ return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
1355
+
1356
+ batch_size = len(required_input)
1357
+ if any(len(v) != batch_size for v in encoded_inputs.values()):
1358
+ raise ValueError("Some items in the output dictionary have a different batch size than others.")
1359
+
1360
+ if padding_strategy == PaddingStrategy.LONGEST:
1361
+ max_length = max(len(inputs) for inputs in required_input)
1362
+ max_entity_length = (
1363
+ max(len(inputs) for inputs in encoded_inputs["entity_ids"]) if "entity_ids" in encoded_inputs else 0
1364
+ )
1365
+ padding_strategy = PaddingStrategy.MAX_LENGTH
1366
+
1367
+ batch_outputs = {}
1368
+ for i in range(batch_size):
1369
+ inputs = {k: v[i] for k, v in encoded_inputs.items()}
1370
+ outputs = self._pad(
1371
+ inputs,
1372
+ max_length=max_length,
1373
+ max_entity_length=max_entity_length,
1374
+ padding_strategy=padding_strategy,
1375
+ pad_to_multiple_of=pad_to_multiple_of,
1376
+ return_attention_mask=return_attention_mask,
1377
+ )
1378
+
1379
+ for key, value in outputs.items():
1380
+ if key not in batch_outputs:
1381
+ batch_outputs[key] = []
1382
+ batch_outputs[key].append(value)
1383
+
1384
+ return BatchEncoding(batch_outputs, tensor_type=return_tensors)
1385
+
1386
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._pad
1387
+ def _pad(
1388
+ self,
1389
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1390
+ max_length: Optional[int] = None,
1391
+ max_entity_length: Optional[int] = None,
1392
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1393
+ pad_to_multiple_of: Optional[int] = None,
1394
+ return_attention_mask: Optional[bool] = None,
1395
+ ) -> dict:
1396
+ """
1397
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1398
+
1399
+
1400
+ Args:
1401
+ encoded_inputs:
1402
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1403
+ max_length: maximum length of the returned list and optionally padding length (see below).
1404
+ Will truncate by taking into account the special tokens.
1405
+ max_entity_length: The maximum length of the entity sequence.
1406
+ padding_strategy: PaddingStrategy to use for padding.
1407
+
1408
+
1409
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1410
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1411
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1412
+ The tokenizer padding sides are defined in self.padding_side:
1413
+
1414
+
1415
+ - 'left': pads on the left of the sequences
1416
+ - 'right': pads on the right of the sequences
1417
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1418
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1419
+ `>= 7.5` (Volta).
1420
+ return_attention_mask:
1421
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1422
+ """
1423
+ entities_provided = bool("entity_ids" in encoded_inputs)
1424
+
1425
+ # Load from model defaults
1426
+ if return_attention_mask is None:
1427
+ return_attention_mask = "attention_mask" in self.model_input_names
1428
+
1429
+ if padding_strategy == PaddingStrategy.LONGEST:
1430
+ max_length = len(encoded_inputs["input_ids"])
1431
+ if entities_provided:
1432
+ max_entity_length = len(encoded_inputs["entity_ids"])
1433
+
1434
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1435
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1436
+
1437
+ if (
1438
+ entities_provided
1439
+ and max_entity_length is not None
1440
+ and pad_to_multiple_of is not None
1441
+ and (max_entity_length % pad_to_multiple_of != 0)
1442
+ ):
1443
+ max_entity_length = ((max_entity_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1444
+
1445
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (
1446
+ len(encoded_inputs["input_ids"]) != max_length
1447
+ or (entities_provided and len(encoded_inputs["entity_ids"]) != max_entity_length)
1448
+ )
1449
+
1450
+ # Initialize attention mask if not present.
1451
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1452
+ encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
1453
+ if entities_provided and return_attention_mask and "entity_attention_mask" not in encoded_inputs:
1454
+ encoded_inputs["entity_attention_mask"] = [1] * len(encoded_inputs["entity_ids"])
1455
+
1456
+ if needs_to_be_padded:
1457
+ difference = max_length - len(encoded_inputs["input_ids"])
1458
+ if entities_provided:
1459
+ entity_difference = max_entity_length - len(encoded_inputs["entity_ids"])
1460
+ if self.padding_side == "right":
1461
+ if return_attention_mask:
1462
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1463
+ if entities_provided:
1464
+ encoded_inputs["entity_attention_mask"] = (
1465
+ encoded_inputs["entity_attention_mask"] + [0] * entity_difference
1466
+ )
1467
+ if "token_type_ids" in encoded_inputs:
1468
+ encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"] + [0] * difference
1469
+ if entities_provided:
1470
+ encoded_inputs["entity_token_type_ids"] = (
1471
+ encoded_inputs["entity_token_type_ids"] + [0] * entity_difference
1472
+ )
1473
+ if "special_tokens_mask" in encoded_inputs:
1474
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1475
+ encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
1476
+ if entities_provided:
1477
+ encoded_inputs["entity_ids"] = (
1478
+ encoded_inputs["entity_ids"] + [self.entity_pad_token_id] * entity_difference
1479
+ )
1480
+ encoded_inputs["entity_position_ids"] = (
1481
+ encoded_inputs["entity_position_ids"] + [[-1] * self.max_mention_length] * entity_difference
1482
+ )
1483
+ if self.task == "entity_span_classification":
1484
+ encoded_inputs["entity_start_positions"] = (
1485
+ encoded_inputs["entity_start_positions"] + [0] * entity_difference
1486
+ )
1487
+ encoded_inputs["entity_end_positions"] = (
1488
+ encoded_inputs["entity_end_positions"] + [0] * entity_difference
1489
+ )
1490
+
1491
+ elif self.padding_side == "left":
1492
+ if return_attention_mask:
1493
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1494
+ if entities_provided:
1495
+ encoded_inputs["entity_attention_mask"] = [0] * entity_difference + encoded_inputs[
1496
+ "entity_attention_mask"
1497
+ ]
1498
+ if "token_type_ids" in encoded_inputs:
1499
+ encoded_inputs["token_type_ids"] = [0] * difference + encoded_inputs["token_type_ids"]
1500
+ if entities_provided:
1501
+ encoded_inputs["entity_token_type_ids"] = [0] * entity_difference + encoded_inputs[
1502
+ "entity_token_type_ids"
1503
+ ]
1504
+ if "special_tokens_mask" in encoded_inputs:
1505
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1506
+ encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
1507
+ if entities_provided:
1508
+ encoded_inputs["entity_ids"] = [self.entity_pad_token_id] * entity_difference + encoded_inputs[
1509
+ "entity_ids"
1510
+ ]
1511
+ encoded_inputs["entity_position_ids"] = [
1512
+ [-1] * self.max_mention_length
1513
+ ] * entity_difference + encoded_inputs["entity_position_ids"]
1514
+ if self.task == "entity_span_classification":
1515
+ encoded_inputs["entity_start_positions"] = [0] * entity_difference + encoded_inputs[
1516
+ "entity_start_positions"
1517
+ ]
1518
+ encoded_inputs["entity_end_positions"] = [0] * entity_difference + encoded_inputs[
1519
+ "entity_end_positions"
1520
+ ]
1521
+ else:
1522
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1523
+
1524
+ return encoded_inputs
1525
+
1526
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str, str]:
1527
+ if not os.path.isdir(save_directory):
1528
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
1529
+ return
1530
+
1531
+ out_vocab_file = os.path.join(
1532
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
1533
+ )
1534
+
1535
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
1536
+ copyfile(self.vocab_file, out_vocab_file)
1537
+ elif not os.path.isfile(self.vocab_file):
1538
+ with open(out_vocab_file, "wb") as fi:
1539
+ content_spiece_model = self.sp_model.serialized_model_proto()
1540
+ fi.write(content_spiece_model)
1541
+
1542
+ entity_vocab_file = os.path.join(
1543
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["entity_vocab_file"]
1544
+ )
1545
+
1546
+ with open(entity_vocab_file, "w", encoding="utf-8") as f:
1547
+ f.write(json.dumps(self.entity_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
1548
+
1549
+ return out_vocab_file, entity_vocab_file
1550
+
1551
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.build_inputs_with_special_tokens
1552
+ def build_inputs_with_special_tokens(
1553
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
1554
+ ) -> List[int]:
1555
+ """
1556
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
1557
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
1558
+
1559
+ - single sequence: `<s> X </s>`
1560
+ - pair of sequences: `<s> A </s></s> B </s>`
1561
+
1562
+ Args:
1563
+ token_ids_0 (`List[int]`):
1564
+ List of IDs to which the special tokens will be added.
1565
+ token_ids_1 (`List[int]`, *optional*):
1566
+ Optional second list of IDs for sequence pairs.
1567
+
1568
+ Returns:
1569
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
1570
+ """
1571
+
1572
+ if token_ids_1 is None:
1573
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
1574
+ cls = [self.cls_token_id]
1575
+ sep = [self.sep_token_id]
1576
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
1577
+
1578
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.get_special_tokens_mask
1579
+ def get_special_tokens_mask(
1580
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
1581
+ ) -> List[int]:
1582
+ """
1583
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
1584
+ special tokens using the tokenizer `prepare_for_model` method.
1585
+
1586
+ Args:
1587
+ token_ids_0 (`List[int]`):
1588
+ List of IDs.
1589
+ token_ids_1 (`List[int]`, *optional*):
1590
+ Optional second list of IDs for sequence pairs.
1591
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
1592
+ Whether or not the token list is already formatted with special tokens for the model.
1593
+
1594
+ Returns:
1595
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
1596
+ """
1597
+
1598
+ if already_has_special_tokens:
1599
+ return super().get_special_tokens_mask(
1600
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
1601
+ )
1602
+
1603
+ if token_ids_1 is None:
1604
+ return [1] + ([0] * len(token_ids_0)) + [1]
1605
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
1606
+
1607
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.create_token_type_ids_from_sequences
1608
+ def create_token_type_ids_from_sequences(
1609
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
1610
+ ) -> List[int]:
1611
+ """
1612
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
1613
+ not make use of token type ids, therefore a list of zeros is returned.
1614
+
1615
+ Args:
1616
+ token_ids_0 (`List[int]`):
1617
+ List of IDs.
1618
+ token_ids_1 (`List[int]`, *optional*):
1619
+ Optional second list of IDs for sequence pairs.
1620
+
1621
+ Returns:
1622
+ `List[int]`: List of zeros.
1623
+
1624
+ """
1625
+
1626
+ sep = [self.sep_token_id]
1627
+ cls = [self.cls_token_id]
1628
+
1629
+ if token_ids_1 is None:
1630
+ return len(cls + token_ids_0 + sep) * [0]
1631
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__init__.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_mpnet": ["MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig"],
29
+ "tokenization_mpnet": ["MPNetTokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_mpnet_fast"] = ["MPNetTokenizerFast"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_mpnet"] = [
47
+ "MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "MPNetForMaskedLM",
49
+ "MPNetForMultipleChoice",
50
+ "MPNetForQuestionAnswering",
51
+ "MPNetForSequenceClassification",
52
+ "MPNetForTokenClassification",
53
+ "MPNetLayer",
54
+ "MPNetModel",
55
+ "MPNetPreTrainedModel",
56
+ ]
57
+
58
+ try:
59
+ if not is_tf_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ _import_structure["modeling_tf_mpnet"] = [
65
+ "TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
66
+ "TFMPNetEmbeddings",
67
+ "TFMPNetForMaskedLM",
68
+ "TFMPNetForMultipleChoice",
69
+ "TFMPNetForQuestionAnswering",
70
+ "TFMPNetForSequenceClassification",
71
+ "TFMPNetForTokenClassification",
72
+ "TFMPNetMainLayer",
73
+ "TFMPNetModel",
74
+ "TFMPNetPreTrainedModel",
75
+ ]
76
+
77
+
78
+ if TYPE_CHECKING:
79
+ from .configuration_mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig
80
+ from .tokenization_mpnet import MPNetTokenizer
81
+
82
+ try:
83
+ if not is_tokenizers_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .tokenization_mpnet_fast import MPNetTokenizerFast
89
+
90
+ try:
91
+ if not is_torch_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_mpnet import (
97
+ MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
98
+ MPNetForMaskedLM,
99
+ MPNetForMultipleChoice,
100
+ MPNetForQuestionAnswering,
101
+ MPNetForSequenceClassification,
102
+ MPNetForTokenClassification,
103
+ MPNetLayer,
104
+ MPNetModel,
105
+ MPNetPreTrainedModel,
106
+ )
107
+
108
+ try:
109
+ if not is_tf_available():
110
+ raise OptionalDependencyNotAvailable()
111
+ except OptionalDependencyNotAvailable:
112
+ pass
113
+ else:
114
+ from .modeling_tf_mpnet import (
115
+ TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
116
+ TFMPNetEmbeddings,
117
+ TFMPNetForMaskedLM,
118
+ TFMPNetForMultipleChoice,
119
+ TFMPNetForQuestionAnswering,
120
+ TFMPNetForSequenceClassification,
121
+ TFMPNetForTokenClassification,
122
+ TFMPNetMainLayer,
123
+ TFMPNetModel,
124
+ TFMPNetPreTrainedModel,
125
+ )
126
+
127
+ else:
128
+ import sys
129
+
130
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/configuration_mpnet.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_mpnet.cpython-310.pyc ADDED
Binary file (30.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc ADDED
Binary file (39.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc ADDED
Binary file (8.01 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/configuration_mpnet.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ MPNet model configuration"""
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
25
+ "microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/config.json",
26
+ }
27
+
28
+
29
+ class MPNetConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`MPNetModel`] or a [`TFMPNetModel`]. It is used to
32
+ instantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the MPNet
34
+ [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 30527):
41
+ Vocabulary size of the MPNet model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`MPNetModel`] or [`TFMPNetModel`].
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
54
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout ratio for the attention probabilities.
58
+ max_position_embeddings (`int`, *optional*, defaults to 512):
59
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
60
+ just in case (e.g., 512 or 1024 or 2048).
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
64
+ The epsilon used by the layer normalization layers.
65
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
66
+ The number of buckets to use for each attention layer.
67
+
68
+ Examples:
69
+
70
+ ```python
71
+ >>> from transformers import MPNetModel, MPNetConfig
72
+
73
+ >>> # Initializing a MPNet mpnet-base style configuration
74
+ >>> configuration = MPNetConfig()
75
+
76
+ >>> # Initializing a model from the mpnet-base style configuration
77
+ >>> model = MPNetModel(configuration)
78
+
79
+ >>> # Accessing the model configuration
80
+ >>> configuration = model.config
81
+ ```"""
82
+
83
+ model_type = "mpnet"
84
+
85
+ def __init__(
86
+ self,
87
+ vocab_size=30527,
88
+ hidden_size=768,
89
+ num_hidden_layers=12,
90
+ num_attention_heads=12,
91
+ intermediate_size=3072,
92
+ hidden_act="gelu",
93
+ hidden_dropout_prob=0.1,
94
+ attention_probs_dropout_prob=0.1,
95
+ max_position_embeddings=512,
96
+ initializer_range=0.02,
97
+ layer_norm_eps=1e-12,
98
+ relative_attention_num_buckets=32,
99
+ pad_token_id=1,
100
+ bos_token_id=0,
101
+ eos_token_id=2,
102
+ **kwargs,
103
+ ):
104
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
105
+
106
+ self.vocab_size = vocab_size
107
+ self.hidden_size = hidden_size
108
+ self.num_hidden_layers = num_hidden_layers
109
+ self.num_attention_heads = num_attention_heads
110
+ self.hidden_act = hidden_act
111
+ self.intermediate_size = intermediate_size
112
+ self.hidden_dropout_prob = hidden_dropout_prob
113
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
114
+ self.max_position_embeddings = max_position_embeddings
115
+ self.initializer_range = initializer_range
116
+ self.layer_norm_eps = layer_norm_eps
117
+ self.relative_attention_num_buckets = relative_attention_num_buckets
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/modeling_mpnet.py ADDED
@@ -0,0 +1,1055 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch MPNet model."""
17
+
18
+
19
+ import math
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN, gelu
27
+ from ...modeling_outputs import (
28
+ BaseModelOutput,
29
+ BaseModelOutputWithPooling,
30
+ MaskedLMOutput,
31
+ MultipleChoiceModelOutput,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutput,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
38
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
39
+ from .configuration_mpnet import MPNetConfig
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+ _CHECKPOINT_FOR_DOC = "microsoft/mpnet-base"
45
+ _CONFIG_FOR_DOC = "MPNetConfig"
46
+
47
+
48
+ MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
49
+ "microsoft/mpnet-base",
50
+ ]
51
+
52
+
53
+ class MPNetPreTrainedModel(PreTrainedModel):
54
+ config_class = MPNetConfig
55
+ pretrained_model_archive_map = MPNET_PRETRAINED_MODEL_ARCHIVE_LIST
56
+ base_model_prefix = "mpnet"
57
+
58
+ def _init_weights(self, module):
59
+ """Initialize the weights"""
60
+ if isinstance(module, nn.Linear):
61
+ # Slightly different from the TF version which uses truncated_normal for initialization
62
+ # cf https://github.com/pytorch/pytorch/pull/5617
63
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
64
+ if module.bias is not None:
65
+ module.bias.data.zero_()
66
+ elif isinstance(module, nn.Embedding):
67
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
68
+ if module.padding_idx is not None:
69
+ module.weight.data[module.padding_idx].zero_()
70
+ elif isinstance(module, nn.LayerNorm):
71
+ module.bias.data.zero_()
72
+ module.weight.data.fill_(1.0)
73
+
74
+
75
+ class MPNetEmbeddings(nn.Module):
76
+ def __init__(self, config):
77
+ super().__init__()
78
+ self.padding_idx = 1
79
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
80
+ self.position_embeddings = nn.Embedding(
81
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
82
+ )
83
+
84
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
85
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
86
+ self.register_buffer(
87
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
88
+ )
89
+
90
+ def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs):
91
+ if position_ids is None:
92
+ if input_ids is not None:
93
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx)
94
+ else:
95
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
96
+
97
+ if input_ids is not None:
98
+ input_shape = input_ids.size()
99
+ else:
100
+ input_shape = inputs_embeds.size()[:-1]
101
+
102
+ seq_length = input_shape[1]
103
+
104
+ if position_ids is None:
105
+ position_ids = self.position_ids[:, :seq_length]
106
+
107
+ if inputs_embeds is None:
108
+ inputs_embeds = self.word_embeddings(input_ids)
109
+ position_embeddings = self.position_embeddings(position_ids)
110
+
111
+ embeddings = inputs_embeds + position_embeddings
112
+ embeddings = self.LayerNorm(embeddings)
113
+ embeddings = self.dropout(embeddings)
114
+ return embeddings
115
+
116
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
117
+ """
118
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
119
+
120
+ Args:
121
+ inputs_embeds: torch.Tensor
122
+
123
+ Returns: torch.Tensor
124
+ """
125
+ input_shape = inputs_embeds.size()[:-1]
126
+ sequence_length = input_shape[1]
127
+
128
+ position_ids = torch.arange(
129
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
130
+ )
131
+ return position_ids.unsqueeze(0).expand(input_shape)
132
+
133
+
134
+ class MPNetSelfAttention(nn.Module):
135
+ def __init__(self, config):
136
+ super().__init__()
137
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
138
+ raise ValueError(
139
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
140
+ f"heads ({config.num_attention_heads})"
141
+ )
142
+
143
+ self.num_attention_heads = config.num_attention_heads
144
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
145
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
146
+
147
+ self.q = nn.Linear(config.hidden_size, self.all_head_size)
148
+ self.k = nn.Linear(config.hidden_size, self.all_head_size)
149
+ self.v = nn.Linear(config.hidden_size, self.all_head_size)
150
+ self.o = nn.Linear(config.hidden_size, config.hidden_size)
151
+
152
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
153
+
154
+ def transpose_for_scores(self, x):
155
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
156
+ x = x.view(*new_x_shape)
157
+ return x.permute(0, 2, 1, 3)
158
+
159
+ def forward(
160
+ self,
161
+ hidden_states,
162
+ attention_mask=None,
163
+ head_mask=None,
164
+ position_bias=None,
165
+ output_attentions=False,
166
+ **kwargs,
167
+ ):
168
+ q = self.q(hidden_states)
169
+ k = self.k(hidden_states)
170
+ v = self.v(hidden_states)
171
+
172
+ q = self.transpose_for_scores(q)
173
+ k = self.transpose_for_scores(k)
174
+ v = self.transpose_for_scores(v)
175
+
176
+ # Take the dot product between "query" and "key" to get the raw attention scores.
177
+ attention_scores = torch.matmul(q, k.transpose(-1, -2))
178
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
179
+
180
+ # Apply relative position embedding (precomputed in MPNetEncoder) if provided.
181
+ if position_bias is not None:
182
+ attention_scores += position_bias
183
+
184
+ if attention_mask is not None:
185
+ attention_scores = attention_scores + attention_mask
186
+
187
+ # Normalize the attention scores to probabilities.
188
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
189
+
190
+ attention_probs = self.dropout(attention_probs)
191
+
192
+ if head_mask is not None:
193
+ attention_probs = attention_probs * head_mask
194
+
195
+ c = torch.matmul(attention_probs, v)
196
+
197
+ c = c.permute(0, 2, 1, 3).contiguous()
198
+ new_c_shape = c.size()[:-2] + (self.all_head_size,)
199
+ c = c.view(*new_c_shape)
200
+
201
+ o = self.o(c)
202
+
203
+ outputs = (o, attention_probs) if output_attentions else (o,)
204
+ return outputs
205
+
206
+
207
+ class MPNetAttention(nn.Module):
208
+ def __init__(self, config):
209
+ super().__init__()
210
+ self.attn = MPNetSelfAttention(config)
211
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
212
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
213
+
214
+ self.pruned_heads = set()
215
+
216
+ def prune_heads(self, heads):
217
+ if len(heads) == 0:
218
+ return
219
+ heads, index = find_pruneable_heads_and_indices(
220
+ heads, self.attn.num_attention_heads, self.attn.attention_head_size, self.pruned_heads
221
+ )
222
+
223
+ self.attn.q = prune_linear_layer(self.attn.q, index)
224
+ self.attn.k = prune_linear_layer(self.attn.k, index)
225
+ self.attn.v = prune_linear_layer(self.attn.v, index)
226
+ self.attn.o = prune_linear_layer(self.attn.o, index, dim=1)
227
+
228
+ self.attn.num_attention_heads = self.attn.num_attention_heads - len(heads)
229
+ self.attn.all_head_size = self.attn.attention_head_size * self.attn.num_attention_heads
230
+ self.pruned_heads = self.pruned_heads.union(heads)
231
+
232
+ def forward(
233
+ self,
234
+ hidden_states,
235
+ attention_mask=None,
236
+ head_mask=None,
237
+ position_bias=None,
238
+ output_attentions=False,
239
+ **kwargs,
240
+ ):
241
+ self_outputs = self.attn(
242
+ hidden_states,
243
+ attention_mask,
244
+ head_mask,
245
+ position_bias,
246
+ output_attentions=output_attentions,
247
+ )
248
+ attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + hidden_states)
249
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
250
+ return outputs
251
+
252
+
253
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
254
+ class MPNetIntermediate(nn.Module):
255
+ def __init__(self, config):
256
+ super().__init__()
257
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
258
+ if isinstance(config.hidden_act, str):
259
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
260
+ else:
261
+ self.intermediate_act_fn = config.hidden_act
262
+
263
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
264
+ hidden_states = self.dense(hidden_states)
265
+ hidden_states = self.intermediate_act_fn(hidden_states)
266
+ return hidden_states
267
+
268
+
269
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
270
+ class MPNetOutput(nn.Module):
271
+ def __init__(self, config):
272
+ super().__init__()
273
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
274
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
275
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
276
+
277
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
278
+ hidden_states = self.dense(hidden_states)
279
+ hidden_states = self.dropout(hidden_states)
280
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
281
+ return hidden_states
282
+
283
+
284
+ class MPNetLayer(nn.Module):
285
+ def __init__(self, config):
286
+ super().__init__()
287
+ self.attention = MPNetAttention(config)
288
+ self.intermediate = MPNetIntermediate(config)
289
+ self.output = MPNetOutput(config)
290
+
291
+ def forward(
292
+ self,
293
+ hidden_states,
294
+ attention_mask=None,
295
+ head_mask=None,
296
+ position_bias=None,
297
+ output_attentions=False,
298
+ **kwargs,
299
+ ):
300
+ self_attention_outputs = self.attention(
301
+ hidden_states,
302
+ attention_mask,
303
+ head_mask,
304
+ position_bias=position_bias,
305
+ output_attentions=output_attentions,
306
+ )
307
+ attention_output = self_attention_outputs[0]
308
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
309
+
310
+ intermediate_output = self.intermediate(attention_output)
311
+ layer_output = self.output(intermediate_output, attention_output)
312
+ outputs = (layer_output,) + outputs
313
+ return outputs
314
+
315
+
316
+ class MPNetEncoder(nn.Module):
317
+ def __init__(self, config):
318
+ super().__init__()
319
+ self.config = config
320
+ self.n_heads = config.num_attention_heads
321
+ self.layer = nn.ModuleList([MPNetLayer(config) for _ in range(config.num_hidden_layers)])
322
+ self.relative_attention_bias = nn.Embedding(config.relative_attention_num_buckets, self.n_heads)
323
+
324
+ def forward(
325
+ self,
326
+ hidden_states: torch.Tensor,
327
+ attention_mask: Optional[torch.Tensor] = None,
328
+ head_mask: Optional[torch.Tensor] = None,
329
+ output_attentions: bool = False,
330
+ output_hidden_states: bool = False,
331
+ return_dict: bool = False,
332
+ **kwargs,
333
+ ):
334
+ position_bias = self.compute_position_bias(hidden_states)
335
+ all_hidden_states = () if output_hidden_states else None
336
+ all_attentions = () if output_attentions else None
337
+ for i, layer_module in enumerate(self.layer):
338
+ if output_hidden_states:
339
+ all_hidden_states = all_hidden_states + (hidden_states,)
340
+
341
+ layer_outputs = layer_module(
342
+ hidden_states,
343
+ attention_mask,
344
+ head_mask[i],
345
+ position_bias,
346
+ output_attentions=output_attentions,
347
+ **kwargs,
348
+ )
349
+ hidden_states = layer_outputs[0]
350
+
351
+ if output_attentions:
352
+ all_attentions = all_attentions + (layer_outputs[1],)
353
+
354
+ # Add last layer
355
+ if output_hidden_states:
356
+ all_hidden_states = all_hidden_states + (hidden_states,)
357
+
358
+ if not return_dict:
359
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
360
+ return BaseModelOutput(
361
+ last_hidden_state=hidden_states,
362
+ hidden_states=all_hidden_states,
363
+ attentions=all_attentions,
364
+ )
365
+
366
+ def compute_position_bias(self, x, position_ids=None, num_buckets=32):
367
+ bsz, qlen, klen = x.size(0), x.size(1), x.size(1)
368
+ if position_ids is not None:
369
+ context_position = position_ids[:, :, None]
370
+ memory_position = position_ids[:, None, :]
371
+ else:
372
+ context_position = torch.arange(qlen, dtype=torch.long)[:, None]
373
+ memory_position = torch.arange(klen, dtype=torch.long)[None, :]
374
+
375
+ relative_position = memory_position - context_position
376
+
377
+ rp_bucket = self.relative_position_bucket(relative_position, num_buckets=num_buckets)
378
+ rp_bucket = rp_bucket.to(x.device)
379
+ values = self.relative_attention_bias(rp_bucket)
380
+ values = values.permute([2, 0, 1]).unsqueeze(0)
381
+ values = values.expand((bsz, -1, qlen, klen)).contiguous()
382
+ return values
383
+
384
+ @staticmethod
385
+ def relative_position_bucket(relative_position, num_buckets=32, max_distance=128):
386
+ ret = 0
387
+ n = -relative_position
388
+
389
+ num_buckets //= 2
390
+ ret += (n < 0).to(torch.long) * num_buckets
391
+ n = torch.abs(n)
392
+
393
+ max_exact = num_buckets // 2
394
+ is_small = n < max_exact
395
+
396
+ val_if_large = max_exact + (
397
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
398
+ ).to(torch.long)
399
+
400
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
401
+ ret += torch.where(is_small, n, val_if_large)
402
+ return ret
403
+
404
+
405
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
406
+ class MPNetPooler(nn.Module):
407
+ def __init__(self, config):
408
+ super().__init__()
409
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
410
+ self.activation = nn.Tanh()
411
+
412
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
413
+ # We "pool" the model by simply taking the hidden state corresponding
414
+ # to the first token.
415
+ first_token_tensor = hidden_states[:, 0]
416
+ pooled_output = self.dense(first_token_tensor)
417
+ pooled_output = self.activation(pooled_output)
418
+ return pooled_output
419
+
420
+
421
+ MPNET_START_DOCSTRING = r"""
422
+
423
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
424
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
425
+ etc.)
426
+
427
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
428
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
429
+ and behavior.
430
+
431
+ Parameters:
432
+ config ([`MPNetConfig`]): Model configuration class with all the parameters of the model.
433
+ Initializing with a config file does not load the weights associated with the model, only the
434
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
435
+ """
436
+
437
+ MPNET_INPUTS_DOCSTRING = r"""
438
+ Args:
439
+ input_ids (`torch.LongTensor` of shape `({0})`):
440
+ Indices of input sequence tokens in the vocabulary.
441
+
442
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
443
+ [`PreTrainedTokenizer.__call__`] for details.
444
+
445
+ [What are input IDs?](../glossary#input-ids)
446
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
447
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
448
+
449
+ - 1 for tokens that are **not masked**,
450
+ - 0 for tokens that are **masked**.
451
+
452
+ [What are attention masks?](../glossary#attention-mask)
453
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
454
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
455
+ config.max_position_embeddings - 1]`.
456
+
457
+ [What are position IDs?](../glossary#position-ids)
458
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
459
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
460
+
461
+ - 1 indicates the head is **not masked**,
462
+ - 0 indicates the head is **masked**.
463
+
464
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
465
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
466
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
467
+ model's internal embedding lookup matrix.
468
+ output_attentions (`bool`, *optional*):
469
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
470
+ tensors for more detail.
471
+ output_hidden_states (`bool`, *optional*):
472
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
473
+ more detail.
474
+ return_dict (`bool`, *optional*):
475
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
476
+ """
477
+
478
+
479
+ @add_start_docstrings(
480
+ "The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.",
481
+ MPNET_START_DOCSTRING,
482
+ )
483
+ class MPNetModel(MPNetPreTrainedModel):
484
+ def __init__(self, config, add_pooling_layer=True):
485
+ super().__init__(config)
486
+ self.config = config
487
+
488
+ self.embeddings = MPNetEmbeddings(config)
489
+ self.encoder = MPNetEncoder(config)
490
+ self.pooler = MPNetPooler(config) if add_pooling_layer else None
491
+
492
+ # Initialize weights and apply final processing
493
+ self.post_init()
494
+
495
+ def get_input_embeddings(self):
496
+ return self.embeddings.word_embeddings
497
+
498
+ def set_input_embeddings(self, value):
499
+ self.embeddings.word_embeddings = value
500
+
501
+ def _prune_heads(self, heads_to_prune):
502
+ """
503
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
504
+ class PreTrainedModel
505
+ """
506
+ for layer, heads in heads_to_prune.items():
507
+ self.encoder.layer[layer].attention.prune_heads(heads)
508
+
509
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
510
+ @add_code_sample_docstrings(
511
+ checkpoint=_CHECKPOINT_FOR_DOC,
512
+ output_type=BaseModelOutputWithPooling,
513
+ config_class=_CONFIG_FOR_DOC,
514
+ )
515
+ def forward(
516
+ self,
517
+ input_ids: Optional[torch.LongTensor] = None,
518
+ attention_mask: Optional[torch.FloatTensor] = None,
519
+ position_ids: Optional[torch.LongTensor] = None,
520
+ head_mask: Optional[torch.FloatTensor] = None,
521
+ inputs_embeds: Optional[torch.FloatTensor] = None,
522
+ output_attentions: Optional[bool] = None,
523
+ output_hidden_states: Optional[bool] = None,
524
+ return_dict: Optional[bool] = None,
525
+ **kwargs,
526
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
527
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
528
+ output_hidden_states = (
529
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
530
+ )
531
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
532
+
533
+ if input_ids is not None and inputs_embeds is not None:
534
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
535
+ elif input_ids is not None:
536
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
537
+ input_shape = input_ids.size()
538
+ elif inputs_embeds is not None:
539
+ input_shape = inputs_embeds.size()[:-1]
540
+ else:
541
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
542
+
543
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
544
+
545
+ if attention_mask is None:
546
+ attention_mask = torch.ones(input_shape, device=device)
547
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
548
+
549
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
550
+ embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds)
551
+ encoder_outputs = self.encoder(
552
+ embedding_output,
553
+ attention_mask=extended_attention_mask,
554
+ head_mask=head_mask,
555
+ output_attentions=output_attentions,
556
+ output_hidden_states=output_hidden_states,
557
+ return_dict=return_dict,
558
+ )
559
+ sequence_output = encoder_outputs[0]
560
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
561
+
562
+ if not return_dict:
563
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
564
+
565
+ return BaseModelOutputWithPooling(
566
+ last_hidden_state=sequence_output,
567
+ pooler_output=pooled_output,
568
+ hidden_states=encoder_outputs.hidden_states,
569
+ attentions=encoder_outputs.attentions,
570
+ )
571
+
572
+
573
+ class MPNetForMaskedLM(MPNetPreTrainedModel):
574
+ _tied_weights_keys = ["lm_head.decoder"]
575
+
576
+ def __init__(self, config):
577
+ super().__init__(config)
578
+
579
+ self.mpnet = MPNetModel(config, add_pooling_layer=False)
580
+ self.lm_head = MPNetLMHead(config)
581
+
582
+ # Initialize weights and apply final processing
583
+ self.post_init()
584
+
585
+ def get_output_embeddings(self):
586
+ return self.lm_head.decoder
587
+
588
+ def set_output_embeddings(self, new_embeddings):
589
+ self.lm_head.decoder = new_embeddings
590
+
591
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
592
+ @add_code_sample_docstrings(
593
+ checkpoint=_CHECKPOINT_FOR_DOC,
594
+ output_type=MaskedLMOutput,
595
+ config_class=_CONFIG_FOR_DOC,
596
+ )
597
+ def forward(
598
+ self,
599
+ input_ids: Optional[torch.LongTensor] = None,
600
+ attention_mask: Optional[torch.FloatTensor] = None,
601
+ position_ids: Optional[torch.LongTensor] = None,
602
+ head_mask: Optional[torch.FloatTensor] = None,
603
+ inputs_embeds: Optional[torch.FloatTensor] = None,
604
+ labels: Optional[torch.LongTensor] = None,
605
+ output_attentions: Optional[bool] = None,
606
+ output_hidden_states: Optional[bool] = None,
607
+ return_dict: Optional[bool] = None,
608
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
609
+ r"""
610
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
611
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
612
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
613
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
614
+ """
615
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
616
+
617
+ outputs = self.mpnet(
618
+ input_ids,
619
+ attention_mask=attention_mask,
620
+ position_ids=position_ids,
621
+ head_mask=head_mask,
622
+ inputs_embeds=inputs_embeds,
623
+ output_attentions=output_attentions,
624
+ output_hidden_states=output_hidden_states,
625
+ return_dict=return_dict,
626
+ )
627
+
628
+ sequence_output = outputs[0]
629
+ prediction_scores = self.lm_head(sequence_output)
630
+
631
+ masked_lm_loss = None
632
+ if labels is not None:
633
+ loss_fct = CrossEntropyLoss()
634
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
635
+
636
+ if not return_dict:
637
+ output = (prediction_scores,) + outputs[2:]
638
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
639
+
640
+ return MaskedLMOutput(
641
+ loss=masked_lm_loss,
642
+ logits=prediction_scores,
643
+ hidden_states=outputs.hidden_states,
644
+ attentions=outputs.attentions,
645
+ )
646
+
647
+
648
+ class MPNetLMHead(nn.Module):
649
+ """MPNet Head for masked and permuted language modeling."""
650
+
651
+ def __init__(self, config):
652
+ super().__init__()
653
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
654
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
655
+
656
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
657
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
658
+
659
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
660
+ self.decoder.bias = self.bias
661
+
662
+ def forward(self, features, **kwargs):
663
+ x = self.dense(features)
664
+ x = gelu(x)
665
+ x = self.layer_norm(x)
666
+
667
+ # project back to size of vocabulary with bias
668
+ x = self.decoder(x)
669
+
670
+ return x
671
+
672
+
673
+ @add_start_docstrings(
674
+ """
675
+ MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
676
+ output) e.g. for GLUE tasks.
677
+ """,
678
+ MPNET_START_DOCSTRING,
679
+ )
680
+ class MPNetForSequenceClassification(MPNetPreTrainedModel):
681
+ def __init__(self, config):
682
+ super().__init__(config)
683
+
684
+ self.num_labels = config.num_labels
685
+ self.mpnet = MPNetModel(config, add_pooling_layer=False)
686
+ self.classifier = MPNetClassificationHead(config)
687
+
688
+ # Initialize weights and apply final processing
689
+ self.post_init()
690
+
691
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
692
+ @add_code_sample_docstrings(
693
+ checkpoint=_CHECKPOINT_FOR_DOC,
694
+ output_type=SequenceClassifierOutput,
695
+ config_class=_CONFIG_FOR_DOC,
696
+ )
697
+ def forward(
698
+ self,
699
+ input_ids: Optional[torch.LongTensor] = None,
700
+ attention_mask: Optional[torch.FloatTensor] = None,
701
+ position_ids: Optional[torch.LongTensor] = None,
702
+ head_mask: Optional[torch.FloatTensor] = None,
703
+ inputs_embeds: Optional[torch.FloatTensor] = None,
704
+ labels: Optional[torch.LongTensor] = None,
705
+ output_attentions: Optional[bool] = None,
706
+ output_hidden_states: Optional[bool] = None,
707
+ return_dict: Optional[bool] = None,
708
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
709
+ r"""
710
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
711
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
712
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
713
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
714
+ """
715
+
716
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
717
+
718
+ outputs = self.mpnet(
719
+ input_ids,
720
+ attention_mask=attention_mask,
721
+ position_ids=position_ids,
722
+ head_mask=head_mask,
723
+ inputs_embeds=inputs_embeds,
724
+ output_attentions=output_attentions,
725
+ output_hidden_states=output_hidden_states,
726
+ return_dict=return_dict,
727
+ )
728
+ sequence_output = outputs[0]
729
+ logits = self.classifier(sequence_output)
730
+
731
+ loss = None
732
+ if labels is not None:
733
+ if self.config.problem_type is None:
734
+ if self.num_labels == 1:
735
+ self.config.problem_type = "regression"
736
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
737
+ self.config.problem_type = "single_label_classification"
738
+ else:
739
+ self.config.problem_type = "multi_label_classification"
740
+
741
+ if self.config.problem_type == "regression":
742
+ loss_fct = MSELoss()
743
+ if self.num_labels == 1:
744
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
745
+ else:
746
+ loss = loss_fct(logits, labels)
747
+ elif self.config.problem_type == "single_label_classification":
748
+ loss_fct = CrossEntropyLoss()
749
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
750
+ elif self.config.problem_type == "multi_label_classification":
751
+ loss_fct = BCEWithLogitsLoss()
752
+ loss = loss_fct(logits, labels)
753
+ if not return_dict:
754
+ output = (logits,) + outputs[2:]
755
+ return ((loss,) + output) if loss is not None else output
756
+
757
+ return SequenceClassifierOutput(
758
+ loss=loss,
759
+ logits=logits,
760
+ hidden_states=outputs.hidden_states,
761
+ attentions=outputs.attentions,
762
+ )
763
+
764
+
765
+ @add_start_docstrings(
766
+ """
767
+ MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
768
+ softmax) e.g. for RocStories/SWAG tasks.
769
+ """,
770
+ MPNET_START_DOCSTRING,
771
+ )
772
+ class MPNetForMultipleChoice(MPNetPreTrainedModel):
773
+ def __init__(self, config):
774
+ super().__init__(config)
775
+
776
+ self.mpnet = MPNetModel(config)
777
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
778
+ self.classifier = nn.Linear(config.hidden_size, 1)
779
+
780
+ # Initialize weights and apply final processing
781
+ self.post_init()
782
+
783
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
784
+ @add_code_sample_docstrings(
785
+ checkpoint=_CHECKPOINT_FOR_DOC,
786
+ output_type=MultipleChoiceModelOutput,
787
+ config_class=_CONFIG_FOR_DOC,
788
+ )
789
+ def forward(
790
+ self,
791
+ input_ids: Optional[torch.LongTensor] = None,
792
+ attention_mask: Optional[torch.FloatTensor] = None,
793
+ position_ids: Optional[torch.LongTensor] = None,
794
+ head_mask: Optional[torch.FloatTensor] = None,
795
+ inputs_embeds: Optional[torch.FloatTensor] = None,
796
+ labels: Optional[torch.LongTensor] = None,
797
+ output_attentions: Optional[bool] = None,
798
+ output_hidden_states: Optional[bool] = None,
799
+ return_dict: Optional[bool] = None,
800
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
801
+ r"""
802
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
803
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
804
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
805
+ `input_ids` above)
806
+ """
807
+
808
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
809
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
810
+
811
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
812
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
813
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
814
+ flat_inputs_embeds = (
815
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
816
+ if inputs_embeds is not None
817
+ else None
818
+ )
819
+
820
+ outputs = self.mpnet(
821
+ flat_input_ids,
822
+ position_ids=flat_position_ids,
823
+ attention_mask=flat_attention_mask,
824
+ head_mask=head_mask,
825
+ inputs_embeds=flat_inputs_embeds,
826
+ output_attentions=output_attentions,
827
+ output_hidden_states=output_hidden_states,
828
+ return_dict=return_dict,
829
+ )
830
+ pooled_output = outputs[1]
831
+
832
+ pooled_output = self.dropout(pooled_output)
833
+ logits = self.classifier(pooled_output)
834
+ reshaped_logits = logits.view(-1, num_choices)
835
+
836
+ loss = None
837
+ if labels is not None:
838
+ loss_fct = CrossEntropyLoss()
839
+ loss = loss_fct(reshaped_logits, labels)
840
+
841
+ if not return_dict:
842
+ output = (reshaped_logits,) + outputs[2:]
843
+ return ((loss,) + output) if loss is not None else output
844
+
845
+ return MultipleChoiceModelOutput(
846
+ loss=loss,
847
+ logits=reshaped_logits,
848
+ hidden_states=outputs.hidden_states,
849
+ attentions=outputs.attentions,
850
+ )
851
+
852
+
853
+ @add_start_docstrings(
854
+ """
855
+ MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
856
+ Named-Entity-Recognition (NER) tasks.
857
+ """,
858
+ MPNET_START_DOCSTRING,
859
+ )
860
+ class MPNetForTokenClassification(MPNetPreTrainedModel):
861
+ def __init__(self, config):
862
+ super().__init__(config)
863
+ self.num_labels = config.num_labels
864
+
865
+ self.mpnet = MPNetModel(config, add_pooling_layer=False)
866
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
867
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
868
+
869
+ # Initialize weights and apply final processing
870
+ self.post_init()
871
+
872
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
873
+ @add_code_sample_docstrings(
874
+ checkpoint=_CHECKPOINT_FOR_DOC,
875
+ output_type=TokenClassifierOutput,
876
+ config_class=_CONFIG_FOR_DOC,
877
+ )
878
+ def forward(
879
+ self,
880
+ input_ids: Optional[torch.LongTensor] = None,
881
+ attention_mask: Optional[torch.FloatTensor] = None,
882
+ position_ids: Optional[torch.LongTensor] = None,
883
+ head_mask: Optional[torch.FloatTensor] = None,
884
+ inputs_embeds: Optional[torch.FloatTensor] = None,
885
+ labels: Optional[torch.LongTensor] = None,
886
+ output_attentions: Optional[bool] = None,
887
+ output_hidden_states: Optional[bool] = None,
888
+ return_dict: Optional[bool] = None,
889
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
890
+ r"""
891
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
892
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
893
+ """
894
+
895
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
896
+
897
+ outputs = self.mpnet(
898
+ input_ids,
899
+ attention_mask=attention_mask,
900
+ position_ids=position_ids,
901
+ head_mask=head_mask,
902
+ inputs_embeds=inputs_embeds,
903
+ output_attentions=output_attentions,
904
+ output_hidden_states=output_hidden_states,
905
+ return_dict=return_dict,
906
+ )
907
+
908
+ sequence_output = outputs[0]
909
+
910
+ sequence_output = self.dropout(sequence_output)
911
+ logits = self.classifier(sequence_output)
912
+
913
+ loss = None
914
+ if labels is not None:
915
+ loss_fct = CrossEntropyLoss()
916
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
917
+
918
+ if not return_dict:
919
+ output = (logits,) + outputs[2:]
920
+ return ((loss,) + output) if loss is not None else output
921
+
922
+ return TokenClassifierOutput(
923
+ loss=loss,
924
+ logits=logits,
925
+ hidden_states=outputs.hidden_states,
926
+ attentions=outputs.attentions,
927
+ )
928
+
929
+
930
+ class MPNetClassificationHead(nn.Module):
931
+ """Head for sentence-level classification tasks."""
932
+
933
+ def __init__(self, config):
934
+ super().__init__()
935
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
936
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
937
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
938
+
939
+ def forward(self, features, **kwargs):
940
+ x = features[:, 0, :] # take <s> token (equiv. to BERT's [CLS] token)
941
+ x = self.dropout(x)
942
+ x = self.dense(x)
943
+ x = torch.tanh(x)
944
+ x = self.dropout(x)
945
+ x = self.out_proj(x)
946
+ return x
947
+
948
+
949
+ @add_start_docstrings(
950
+ """
951
+ MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
952
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
953
+ """,
954
+ MPNET_START_DOCSTRING,
955
+ )
956
+ class MPNetForQuestionAnswering(MPNetPreTrainedModel):
957
+ def __init__(self, config):
958
+ super().__init__(config)
959
+
960
+ self.num_labels = config.num_labels
961
+ self.mpnet = MPNetModel(config, add_pooling_layer=False)
962
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
963
+
964
+ # Initialize weights and apply final processing
965
+ self.post_init()
966
+
967
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
968
+ @add_code_sample_docstrings(
969
+ checkpoint=_CHECKPOINT_FOR_DOC,
970
+ output_type=QuestionAnsweringModelOutput,
971
+ config_class=_CONFIG_FOR_DOC,
972
+ )
973
+ def forward(
974
+ self,
975
+ input_ids: Optional[torch.LongTensor] = None,
976
+ attention_mask: Optional[torch.FloatTensor] = None,
977
+ position_ids: Optional[torch.LongTensor] = None,
978
+ head_mask: Optional[torch.FloatTensor] = None,
979
+ inputs_embeds: Optional[torch.FloatTensor] = None,
980
+ start_positions: Optional[torch.LongTensor] = None,
981
+ end_positions: Optional[torch.LongTensor] = None,
982
+ output_attentions: Optional[bool] = None,
983
+ output_hidden_states: Optional[bool] = None,
984
+ return_dict: Optional[bool] = None,
985
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
986
+ r"""
987
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
988
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
989
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
990
+ are not taken into account for computing the loss.
991
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
992
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
993
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
994
+ are not taken into account for computing the loss.
995
+ """
996
+
997
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
998
+
999
+ outputs = self.mpnet(
1000
+ input_ids,
1001
+ attention_mask=attention_mask,
1002
+ position_ids=position_ids,
1003
+ head_mask=head_mask,
1004
+ inputs_embeds=inputs_embeds,
1005
+ output_attentions=output_attentions,
1006
+ output_hidden_states=output_hidden_states,
1007
+ return_dict=return_dict,
1008
+ )
1009
+
1010
+ sequence_output = outputs[0]
1011
+
1012
+ logits = self.qa_outputs(sequence_output)
1013
+ start_logits, end_logits = logits.split(1, dim=-1)
1014
+ start_logits = start_logits.squeeze(-1).contiguous()
1015
+ end_logits = end_logits.squeeze(-1).contiguous()
1016
+
1017
+ total_loss = None
1018
+ if start_positions is not None and end_positions is not None:
1019
+ # If we are on multi-GPU, split add a dimension
1020
+ if len(start_positions.size()) > 1:
1021
+ start_positions = start_positions.squeeze(-1)
1022
+ if len(end_positions.size()) > 1:
1023
+ end_positions = end_positions.squeeze(-1)
1024
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1025
+ ignored_index = start_logits.size(1)
1026
+ start_positions = start_positions.clamp(0, ignored_index)
1027
+ end_positions = end_positions.clamp(0, ignored_index)
1028
+
1029
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1030
+ start_loss = loss_fct(start_logits, start_positions)
1031
+ end_loss = loss_fct(end_logits, end_positions)
1032
+ total_loss = (start_loss + end_loss) / 2
1033
+
1034
+ if not return_dict:
1035
+ output = (start_logits, end_logits) + outputs[2:]
1036
+ return ((total_loss,) + output) if total_loss is not None else output
1037
+
1038
+ return QuestionAnsweringModelOutput(
1039
+ loss=total_loss,
1040
+ start_logits=start_logits,
1041
+ end_logits=end_logits,
1042
+ hidden_states=outputs.hidden_states,
1043
+ attentions=outputs.attentions,
1044
+ )
1045
+
1046
+
1047
+ def create_position_ids_from_input_ids(input_ids, padding_idx):
1048
+ """
1049
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1050
+ are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor:
1051
+ """
1052
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1053
+ mask = input_ids.ne(padding_idx).int()
1054
+ incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
1055
+ return incremental_indices.long() + padding_idx
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/modeling_tf_mpnet.py ADDED
@@ -0,0 +1,1346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ TF 2.0 MPNet model."""
17
+
18
+
19
+ from __future__ import annotations
20
+
21
+ import math
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFBaseModelOutputWithPooling,
32
+ TFMaskedLMOutput,
33
+ TFMultipleChoiceModelOutput,
34
+ TFQuestionAnsweringModelOutput,
35
+ TFSequenceClassifierOutput,
36
+ TFTokenClassifierOutput,
37
+ )
38
+ from ...modeling_tf_utils import (
39
+ TFMaskedLanguageModelingLoss,
40
+ TFModelInputType,
41
+ TFMultipleChoiceLoss,
42
+ TFPreTrainedModel,
43
+ TFQuestionAnsweringLoss,
44
+ TFSequenceClassificationLoss,
45
+ TFTokenClassificationLoss,
46
+ get_initializer,
47
+ keras,
48
+ keras_serializable,
49
+ unpack_inputs,
50
+ )
51
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
52
+ from ...utils import (
53
+ add_code_sample_docstrings,
54
+ add_start_docstrings,
55
+ add_start_docstrings_to_model_forward,
56
+ logging,
57
+ )
58
+ from .configuration_mpnet import MPNetConfig
59
+
60
+
61
+ logger = logging.get_logger(__name__)
62
+
63
+ _CHECKPOINT_FOR_DOC = "microsoft/mpnet-base"
64
+ _CONFIG_FOR_DOC = "MPNetConfig"
65
+
66
+ TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
67
+ "microsoft/mpnet-base",
68
+ ]
69
+
70
+
71
+ class TFMPNetPreTrainedModel(TFPreTrainedModel):
72
+ """
73
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
74
+ models.
75
+ """
76
+
77
+ config_class = MPNetConfig
78
+ base_model_prefix = "mpnet"
79
+
80
+
81
+ class TFMPNetEmbeddings(keras.layers.Layer):
82
+ """Construct the embeddings from word, position embeddings."""
83
+
84
+ def __init__(self, config, **kwargs):
85
+ super().__init__(**kwargs)
86
+
87
+ self.padding_idx = 1
88
+ self.config = config
89
+ self.hidden_size = config.hidden_size
90
+ self.max_position_embeddings = config.max_position_embeddings
91
+ self.initializer_range = config.initializer_range
92
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
93
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
94
+
95
+ def build(self, input_shape=None):
96
+ with tf.name_scope("word_embeddings"):
97
+ self.weight = self.add_weight(
98
+ name="weight",
99
+ shape=[self.config.vocab_size, self.hidden_size],
100
+ initializer=get_initializer(initializer_range=self.initializer_range),
101
+ )
102
+
103
+ with tf.name_scope("position_embeddings"):
104
+ self.position_embeddings = self.add_weight(
105
+ name="embeddings",
106
+ shape=[self.max_position_embeddings, self.hidden_size],
107
+ initializer=get_initializer(initializer_range=self.initializer_range),
108
+ )
109
+
110
+ if self.built:
111
+ return
112
+ self.built = True
113
+ if getattr(self, "LayerNorm", None) is not None:
114
+ with tf.name_scope(self.LayerNorm.name):
115
+ self.LayerNorm.build([None, None, self.config.hidden_size])
116
+
117
+ def create_position_ids_from_input_ids(self, input_ids):
118
+ """
119
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
120
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
121
+
122
+ Args:
123
+ input_ids: tf.Tensor
124
+ Returns: tf.Tensor
125
+ """
126
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
127
+ incremental_indices = tf.math.cumsum(mask, axis=1) * mask
128
+
129
+ return incremental_indices + self.padding_idx
130
+
131
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
132
+ """
133
+ Applies embedding based on inputs tensor.
134
+
135
+ Returns:
136
+ final_embeddings (`tf.Tensor`): output embedding tensor.
137
+ """
138
+ assert not (input_ids is None and inputs_embeds is None)
139
+
140
+ if input_ids is not None:
141
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
142
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
143
+
144
+ input_shape = shape_list(inputs_embeds)[:-1]
145
+
146
+ if position_ids is None:
147
+ if input_ids is not None:
148
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
149
+ position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids)
150
+ else:
151
+ position_ids = tf.expand_dims(
152
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
153
+ )
154
+
155
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
156
+ final_embeddings = inputs_embeds + position_embeds
157
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
158
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
159
+
160
+ return final_embeddings
161
+
162
+
163
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->MPNet
164
+ class TFMPNetPooler(keras.layers.Layer):
165
+ def __init__(self, config: MPNetConfig, **kwargs):
166
+ super().__init__(**kwargs)
167
+
168
+ self.dense = keras.layers.Dense(
169
+ units=config.hidden_size,
170
+ kernel_initializer=get_initializer(config.initializer_range),
171
+ activation="tanh",
172
+ name="dense",
173
+ )
174
+ self.config = config
175
+
176
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
177
+ # We "pool" the model by simply taking the hidden state corresponding
178
+ # to the first token.
179
+ first_token_tensor = hidden_states[:, 0]
180
+ pooled_output = self.dense(inputs=first_token_tensor)
181
+
182
+ return pooled_output
183
+
184
+ def build(self, input_shape=None):
185
+ if self.built:
186
+ return
187
+ self.built = True
188
+ if getattr(self, "dense", None) is not None:
189
+ with tf.name_scope(self.dense.name):
190
+ self.dense.build([None, None, self.config.hidden_size])
191
+
192
+
193
+ class TFMPNetSelfAttention(keras.layers.Layer):
194
+ def __init__(self, config, **kwargs):
195
+ super().__init__(**kwargs)
196
+
197
+ if config.hidden_size % config.num_attention_heads != 0:
198
+ raise ValueError(
199
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
200
+ f"heads ({config.num_attention_heads}"
201
+ )
202
+
203
+ self.num_attention_heads = config.num_attention_heads
204
+ assert config.hidden_size % config.num_attention_heads == 0
205
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
206
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
207
+
208
+ self.q = keras.layers.Dense(
209
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="q"
210
+ )
211
+ self.k = keras.layers.Dense(
212
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="k"
213
+ )
214
+ self.v = keras.layers.Dense(
215
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="v"
216
+ )
217
+ self.o = keras.layers.Dense(
218
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="o"
219
+ )
220
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
221
+ self.config = config
222
+
223
+ def transpose_for_scores(self, x, batch_size):
224
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
225
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
226
+
227
+ return tf.transpose(x, perm=[0, 2, 1, 3])
228
+
229
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
230
+ batch_size = shape_list(hidden_states)[0]
231
+
232
+ q = self.q(hidden_states)
233
+ k = self.k(hidden_states)
234
+ v = self.v(hidden_states)
235
+
236
+ q = self.transpose_for_scores(q, batch_size)
237
+ k = self.transpose_for_scores(k, batch_size)
238
+ v = self.transpose_for_scores(v, batch_size)
239
+
240
+ attention_scores = tf.matmul(q, k, transpose_b=True)
241
+ dk = tf.cast(shape_list(k)[-1], attention_scores.dtype)
242
+ attention_scores = attention_scores / tf.math.sqrt(dk)
243
+
244
+ # Apply relative position embedding (precomputed in MPNetEncoder) if provided.
245
+ if position_bias is not None:
246
+ attention_scores += position_bias
247
+
248
+ if attention_mask is not None:
249
+ attention_scores = attention_scores + attention_mask
250
+
251
+ attention_probs = stable_softmax(attention_scores, axis=-1)
252
+
253
+ attention_probs = self.dropout(attention_probs, training=training)
254
+
255
+ if head_mask is not None:
256
+ attention_probs = attention_probs * head_mask
257
+
258
+ c = tf.matmul(attention_probs, v)
259
+ c = tf.transpose(c, perm=[0, 2, 1, 3])
260
+ c = tf.reshape(c, (batch_size, -1, self.all_head_size))
261
+ o = self.o(c)
262
+
263
+ outputs = (o, attention_probs) if output_attentions else (o,)
264
+ return outputs
265
+
266
+ def build(self, input_shape=None):
267
+ if self.built:
268
+ return
269
+ self.built = True
270
+ if getattr(self, "q", None) is not None:
271
+ with tf.name_scope(self.q.name):
272
+ self.q.build([None, None, self.config.hidden_size])
273
+ if getattr(self, "k", None) is not None:
274
+ with tf.name_scope(self.k.name):
275
+ self.k.build([None, None, self.config.hidden_size])
276
+ if getattr(self, "v", None) is not None:
277
+ with tf.name_scope(self.v.name):
278
+ self.v.build([None, None, self.config.hidden_size])
279
+ if getattr(self, "o", None) is not None:
280
+ with tf.name_scope(self.o.name):
281
+ self.o.build([None, None, self.config.hidden_size])
282
+
283
+
284
+ class TFMPNetAttention(keras.layers.Layer):
285
+ def __init__(self, config, **kwargs):
286
+ super().__init__(**kwargs)
287
+
288
+ self.attn = TFMPNetSelfAttention(config, name="attn")
289
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
290
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
291
+ self.config = config
292
+
293
+ def prune_heads(self, heads):
294
+ raise NotImplementedError
295
+
296
+ def call(self, input_tensor, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
297
+ self_outputs = self.attn(
298
+ input_tensor, attention_mask, head_mask, output_attentions, position_bias=position_bias, training=training
299
+ )
300
+ attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + input_tensor)
301
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
302
+ return outputs
303
+
304
+ def build(self, input_shape=None):
305
+ if self.built:
306
+ return
307
+ self.built = True
308
+ if getattr(self, "attn", None) is not None:
309
+ with tf.name_scope(self.attn.name):
310
+ self.attn.build(None)
311
+ if getattr(self, "LayerNorm", None) is not None:
312
+ with tf.name_scope(self.LayerNorm.name):
313
+ self.LayerNorm.build([None, None, self.config.hidden_size])
314
+
315
+
316
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->MPNet
317
+ class TFMPNetIntermediate(keras.layers.Layer):
318
+ def __init__(self, config: MPNetConfig, **kwargs):
319
+ super().__init__(**kwargs)
320
+
321
+ self.dense = keras.layers.Dense(
322
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
323
+ )
324
+
325
+ if isinstance(config.hidden_act, str):
326
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
327
+ else:
328
+ self.intermediate_act_fn = config.hidden_act
329
+ self.config = config
330
+
331
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
332
+ hidden_states = self.dense(inputs=hidden_states)
333
+ hidden_states = self.intermediate_act_fn(hidden_states)
334
+
335
+ return hidden_states
336
+
337
+ def build(self, input_shape=None):
338
+ if self.built:
339
+ return
340
+ self.built = True
341
+ if getattr(self, "dense", None) is not None:
342
+ with tf.name_scope(self.dense.name):
343
+ self.dense.build([None, None, self.config.hidden_size])
344
+
345
+
346
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->MPNet
347
+ class TFMPNetOutput(keras.layers.Layer):
348
+ def __init__(self, config: MPNetConfig, **kwargs):
349
+ super().__init__(**kwargs)
350
+
351
+ self.dense = keras.layers.Dense(
352
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
353
+ )
354
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
355
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
356
+ self.config = config
357
+
358
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
359
+ hidden_states = self.dense(inputs=hidden_states)
360
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
361
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
362
+
363
+ return hidden_states
364
+
365
+ def build(self, input_shape=None):
366
+ if self.built:
367
+ return
368
+ self.built = True
369
+ if getattr(self, "dense", None) is not None:
370
+ with tf.name_scope(self.dense.name):
371
+ self.dense.build([None, None, self.config.intermediate_size])
372
+ if getattr(self, "LayerNorm", None) is not None:
373
+ with tf.name_scope(self.LayerNorm.name):
374
+ self.LayerNorm.build([None, None, self.config.hidden_size])
375
+
376
+
377
+ class TFMPNetLayer(keras.layers.Layer):
378
+ def __init__(self, config, **kwargs):
379
+ super().__init__(**kwargs)
380
+
381
+ self.attention = TFMPNetAttention(config, name="attention")
382
+ self.intermediate = TFMPNetIntermediate(config, name="intermediate")
383
+ self.out = TFMPNetOutput(config, name="output")
384
+
385
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
386
+ self_attention_outputs = self.attention(
387
+ hidden_states, attention_mask, head_mask, output_attentions, position_bias=position_bias, training=training
388
+ )
389
+ attention_output = self_attention_outputs[0]
390
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
391
+
392
+ intermediate_output = self.intermediate(attention_output)
393
+ layer_output = self.out(intermediate_output, attention_output, training=training)
394
+ outputs = (layer_output,) + outputs # add attentions if we output them
395
+
396
+ return outputs
397
+
398
+ def build(self, input_shape=None):
399
+ if self.built:
400
+ return
401
+ self.built = True
402
+ if getattr(self, "attention", None) is not None:
403
+ with tf.name_scope(self.attention.name):
404
+ self.attention.build(None)
405
+ if getattr(self, "intermediate", None) is not None:
406
+ with tf.name_scope(self.intermediate.name):
407
+ self.intermediate.build(None)
408
+ if getattr(self, "out", None) is not None:
409
+ with tf.name_scope(self.out.name):
410
+ self.out.build(None)
411
+
412
+
413
+ class TFMPNetEncoder(keras.layers.Layer):
414
+ def __init__(self, config, **kwargs):
415
+ super().__init__(**kwargs)
416
+
417
+ self.config = config
418
+ self.n_heads = config.num_attention_heads
419
+ self.output_attentions = config.output_attentions
420
+ self.output_hidden_states = config.output_hidden_states
421
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
422
+ self.initializer_range = config.initializer_range
423
+
424
+ self.layer = [TFMPNetLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
425
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
426
+
427
+ def build(self, input_shape=None):
428
+ if self.built:
429
+ return
430
+ self.built = True
431
+ with tf.name_scope("relative_attention_bias"):
432
+ self.relative_attention_bias = self.add_weight(
433
+ name="embeddings",
434
+ shape=[self.relative_attention_num_buckets, self.n_heads],
435
+ initializer=get_initializer(self.initializer_range),
436
+ )
437
+ if getattr(self, "layer", None) is not None:
438
+ for layer in self.layer:
439
+ with tf.name_scope(layer.name):
440
+ layer.build(None)
441
+
442
+ def call(
443
+ self,
444
+ hidden_states,
445
+ attention_mask,
446
+ head_mask,
447
+ output_attentions,
448
+ output_hidden_states,
449
+ return_dict,
450
+ training=False,
451
+ ):
452
+ position_bias = self.compute_position_bias(hidden_states)
453
+ all_hidden_states = () if output_hidden_states else None
454
+ all_attentions = () if output_attentions else None
455
+
456
+ for i, layer_module in enumerate(self.layer):
457
+ if output_hidden_states:
458
+ all_hidden_states = all_hidden_states + (hidden_states,)
459
+
460
+ layer_outputs = layer_module(
461
+ hidden_states,
462
+ attention_mask,
463
+ head_mask[i],
464
+ output_attentions,
465
+ position_bias=position_bias,
466
+ training=training,
467
+ )
468
+ hidden_states = layer_outputs[0]
469
+
470
+ if output_attentions:
471
+ all_attentions = all_attentions + (layer_outputs[1],)
472
+
473
+ # Add last layer
474
+ if output_hidden_states:
475
+ all_hidden_states = all_hidden_states + (hidden_states,)
476
+
477
+ if not return_dict:
478
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
479
+
480
+ return TFBaseModelOutput(
481
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
482
+ )
483
+
484
+ @staticmethod
485
+ def _relative_position_bucket(relative_position, num_buckets=32, max_distance=128):
486
+ ret = 0
487
+ n = -relative_position
488
+
489
+ num_buckets //= 2
490
+ ret += tf.cast(tf.math.less(n, 0), dtype=relative_position.dtype) * num_buckets
491
+ n = tf.math.abs(n)
492
+
493
+ # now n is in the range [0, inf)
494
+ max_exact = num_buckets // 2
495
+ is_small = tf.math.less(n, max_exact)
496
+
497
+ val_if_large = max_exact + tf.cast(
498
+ tf.math.log(n / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact),
499
+ dtype=relative_position.dtype,
500
+ )
501
+
502
+ val_if_large = tf.math.minimum(val_if_large, num_buckets - 1)
503
+ ret += tf.where(is_small, n, val_if_large)
504
+ return ret
505
+
506
+ def compute_position_bias(self, x, position_ids=None):
507
+ """Compute binned relative position bias"""
508
+ input_shape = shape_list(x)
509
+ qlen, klen = input_shape[1], input_shape[1]
510
+
511
+ if position_ids is not None:
512
+ context_position = position_ids[:, :, None]
513
+ memory_position = position_ids[:, None, :]
514
+ else:
515
+ context_position = tf.range(qlen)[:, None]
516
+ memory_position = tf.range(klen)[None, :]
517
+
518
+ relative_position = memory_position - context_position # shape (qlen, klen)
519
+
520
+ rp_bucket = self._relative_position_bucket(
521
+ relative_position,
522
+ num_buckets=self.relative_attention_num_buckets,
523
+ )
524
+ values = tf.gather(self.relative_attention_bias, rp_bucket) # shape (qlen, klen, num_heads)
525
+ values = tf.expand_dims(tf.transpose(values, [2, 0, 1]), axis=0) # shape (1, num_heads, qlen, klen)
526
+ return values
527
+
528
+
529
+ @keras_serializable
530
+ class TFMPNetMainLayer(keras.layers.Layer):
531
+ config_class = MPNetConfig
532
+
533
+ def __init__(self, config, **kwargs):
534
+ super().__init__(**kwargs)
535
+
536
+ self.config = config
537
+ self.num_hidden_layers = config.num_hidden_layers
538
+ self.initializer_range = config.initializer_range
539
+ self.output_attentions = config.output_attentions
540
+ self.output_hidden_states = config.output_hidden_states
541
+ self.return_dict = config.use_return_dict
542
+ self.encoder = TFMPNetEncoder(config, name="encoder")
543
+ self.pooler = TFMPNetPooler(config, name="pooler")
544
+ # The embeddings must be the last declaration in order to follow the weights order
545
+ self.embeddings = TFMPNetEmbeddings(config, name="embeddings")
546
+
547
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
548
+ def get_input_embeddings(self) -> keras.layers.Layer:
549
+ return self.embeddings
550
+
551
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
552
+ def set_input_embeddings(self, value: tf.Variable):
553
+ self.embeddings.weight = value
554
+ self.embeddings.vocab_size = shape_list(value)[0]
555
+
556
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
557
+ def _prune_heads(self, heads_to_prune):
558
+ """
559
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
560
+ class PreTrainedModel
561
+ """
562
+ raise NotImplementedError
563
+
564
+ @unpack_inputs
565
+ def call(
566
+ self,
567
+ input_ids=None,
568
+ attention_mask=None,
569
+ position_ids=None,
570
+ head_mask=None,
571
+ inputs_embeds=None,
572
+ output_attentions=None,
573
+ output_hidden_states=None,
574
+ return_dict=None,
575
+ training=False,
576
+ ):
577
+ if input_ids is not None and inputs_embeds is not None:
578
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
579
+ elif input_ids is not None:
580
+ input_shape = shape_list(input_ids)
581
+ elif inputs_embeds is not None:
582
+ input_shape = shape_list(inputs_embeds)[:-1]
583
+ else:
584
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
585
+
586
+ if attention_mask is None:
587
+ attention_mask = tf.fill(input_shape, 1)
588
+
589
+ embedding_output = self.embeddings(
590
+ input_ids,
591
+ position_ids,
592
+ inputs_embeds,
593
+ training=training,
594
+ )
595
+
596
+ # We create a 3D attention mask from a 2D tensor mask.
597
+ # Sizes are [batch_size, 1, 1, to_seq_length]
598
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
599
+ # this attention mask is more simple than the triangular masking of causal attention
600
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
601
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
602
+
603
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
604
+ # masked positions, this operation will create a tensor which is 0.0 for
605
+ # positions we want to attend and -10000.0 for masked positions.
606
+ # Since we are adding it to the raw scores before the softmax, this is
607
+ # effectively the same as removing these entirely.
608
+ extended_attention_mask = tf.cast(extended_attention_mask, embedding_output.dtype)
609
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
610
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
611
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
612
+
613
+ # Prepare head mask if needed
614
+ # 1.0 in head_mask indicate we keep the head
615
+ # attention_probs has shape bsz x n_heads x N x N
616
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
617
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
618
+ if head_mask is not None:
619
+ raise NotImplementedError
620
+ else:
621
+ head_mask = [None] * self.num_hidden_layers
622
+
623
+ encoder_outputs = self.encoder(
624
+ embedding_output,
625
+ extended_attention_mask,
626
+ head_mask,
627
+ output_attentions,
628
+ output_hidden_states,
629
+ return_dict,
630
+ training=training,
631
+ )
632
+
633
+ sequence_output = encoder_outputs[0]
634
+ pooled_output = self.pooler(sequence_output)
635
+
636
+ if not return_dict:
637
+ return (
638
+ sequence_output,
639
+ pooled_output,
640
+ ) + encoder_outputs[1:]
641
+
642
+ return TFBaseModelOutputWithPooling(
643
+ last_hidden_state=sequence_output,
644
+ pooler_output=pooled_output,
645
+ hidden_states=encoder_outputs.hidden_states,
646
+ attentions=encoder_outputs.attentions,
647
+ )
648
+
649
+ def build(self, input_shape=None):
650
+ if self.built:
651
+ return
652
+ self.built = True
653
+ if getattr(self, "encoder", None) is not None:
654
+ with tf.name_scope(self.encoder.name):
655
+ self.encoder.build(None)
656
+ if getattr(self, "pooler", None) is not None:
657
+ with tf.name_scope(self.pooler.name):
658
+ self.pooler.build(None)
659
+ if getattr(self, "embeddings", None) is not None:
660
+ with tf.name_scope(self.embeddings.name):
661
+ self.embeddings.build(None)
662
+
663
+
664
+ MPNET_START_DOCSTRING = r"""
665
+
666
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
667
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
668
+ etc.)
669
+
670
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
671
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
672
+ behavior.
673
+
674
+ <Tip>
675
+
676
+ TensorFlow models and layers in `transformers` accept two formats as input:
677
+
678
+ - having all inputs as keyword arguments (like PyTorch models), or
679
+ - having all inputs as a list, tuple or dict in the first positional argument.
680
+
681
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
682
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
683
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
684
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
685
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
686
+ positional argument:
687
+
688
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
689
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
690
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
691
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
692
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
693
+
694
+ Note that when creating models and layers with
695
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
696
+ about any of this, as you can just pass inputs like you would to any other Python function!
697
+
698
+ </Tip>
699
+
700
+ Args:
701
+ config ([`MPNetConfig`]): Model configuration class with all the parameters of the model.
702
+ Initializing with a config file does not load the weights associated with the model, only the
703
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
704
+ """
705
+
706
+ MPNET_INPUTS_DOCSTRING = r"""
707
+ Args:
708
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
709
+ Indices of input sequence tokens in the vocabulary.
710
+
711
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
712
+ [`PreTrainedTokenizer.encode`] for details.
713
+
714
+ [What are input IDs?](../glossary#input-ids)
715
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
716
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
717
+
718
+ - 1 for tokens that are **not masked**,
719
+ - 0 for tokens that are **masked**.
720
+
721
+ [What are attention masks?](../glossary#attention-mask)
722
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
723
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
724
+ config.max_position_embeddings - 1]`.
725
+
726
+ [What are position IDs?](../glossary#position-ids)
727
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
728
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
729
+
730
+ - 1 indicates the head is **not masked**,
731
+ - 0 indicates the head is **masked**.
732
+
733
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
734
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
735
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
736
+ model's internal embedding lookup matrix.
737
+ output_attentions (`bool`, *optional*):
738
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
739
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
740
+ config will be used instead.
741
+ output_hidden_states (`bool`, *optional*):
742
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
743
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
744
+ used instead.
745
+ return_dict (`bool`, *optional*):
746
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
747
+ eager mode, in graph mode the value will always be set to True.
748
+ training (`bool`, *optional*, defaults to `False`):
749
+ Whether or not to use the model in training mode (some modules like dropout modules have different
750
+ behaviors between training and evaluation).
751
+ """
752
+
753
+
754
+ @add_start_docstrings(
755
+ "The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.",
756
+ MPNET_START_DOCSTRING,
757
+ )
758
+ class TFMPNetModel(TFMPNetPreTrainedModel):
759
+ def __init__(self, config, *inputs, **kwargs):
760
+ super().__init__(config, *inputs, **kwargs)
761
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
762
+
763
+ @unpack_inputs
764
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
765
+ @add_code_sample_docstrings(
766
+ checkpoint=_CHECKPOINT_FOR_DOC,
767
+ output_type=TFBaseModelOutput,
768
+ config_class=_CONFIG_FOR_DOC,
769
+ )
770
+ def call(
771
+ self,
772
+ input_ids: TFModelInputType | None = None,
773
+ attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
774
+ position_ids: Optional[Union[np.array, tf.Tensor]] = None,
775
+ head_mask: Optional[Union[np.array, tf.Tensor]] = None,
776
+ inputs_embeds: tf.Tensor | None = None,
777
+ output_attentions: Optional[bool] = None,
778
+ output_hidden_states: Optional[bool] = None,
779
+ return_dict: Optional[bool] = None,
780
+ training: bool = False,
781
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
782
+ outputs = self.mpnet(
783
+ input_ids=input_ids,
784
+ attention_mask=attention_mask,
785
+ position_ids=position_ids,
786
+ head_mask=head_mask,
787
+ inputs_embeds=inputs_embeds,
788
+ output_attentions=output_attentions,
789
+ output_hidden_states=output_hidden_states,
790
+ return_dict=return_dict,
791
+ training=training,
792
+ )
793
+ return outputs
794
+
795
+ def build(self, input_shape=None):
796
+ if self.built:
797
+ return
798
+ self.built = True
799
+ if getattr(self, "mpnet", None) is not None:
800
+ with tf.name_scope(self.mpnet.name):
801
+ self.mpnet.build(None)
802
+
803
+
804
+ class TFMPNetLMHead(keras.layers.Layer):
805
+ """MPNet head for masked and permuted language modeling"""
806
+
807
+ def __init__(self, config, input_embeddings, **kwargs):
808
+ super().__init__(**kwargs)
809
+
810
+ self.config = config
811
+ self.hidden_size = config.hidden_size
812
+ self.dense = keras.layers.Dense(
813
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
814
+ )
815
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
816
+ self.act = get_tf_activation("gelu")
817
+
818
+ # The output weights are the same as the input embeddings, but there is
819
+ # an output-only bias for each token.
820
+ self.decoder = input_embeddings
821
+
822
+ def build(self, input_shape=None):
823
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
824
+
825
+ if self.built:
826
+ return
827
+ self.built = True
828
+ if getattr(self, "dense", None) is not None:
829
+ with tf.name_scope(self.dense.name):
830
+ self.dense.build([None, None, self.config.hidden_size])
831
+ if getattr(self, "layer_norm", None) is not None:
832
+ with tf.name_scope(self.layer_norm.name):
833
+ self.layer_norm.build([None, None, self.config.hidden_size])
834
+
835
+ def get_output_embeddings(self):
836
+ return self.decoder
837
+
838
+ def set_output_embeddings(self, value):
839
+ self.decoder.weight = value
840
+ self.decoder.vocab_size = shape_list(value)[0]
841
+
842
+ def get_bias(self):
843
+ return {"bias": self.bias}
844
+
845
+ def set_bias(self, value):
846
+ self.bias = value["bias"]
847
+ self.config.vocab_size = shape_list(value["bias"])[0]
848
+
849
+ def call(self, hidden_states):
850
+ hidden_states = self.dense(hidden_states)
851
+ hidden_states = self.act(hidden_states)
852
+ hidden_states = self.layer_norm(hidden_states)
853
+
854
+ # project back to size of vocabulary with bias
855
+ seq_length = shape_list(tensor=hidden_states)[1]
856
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
857
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
858
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
859
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
860
+
861
+ return hidden_states
862
+
863
+
864
+ @add_start_docstrings("""MPNet Model with a `language modeling` head on top.""", MPNET_START_DOCSTRING)
865
+ class TFMPNetForMaskedLM(TFMPNetPreTrainedModel, TFMaskedLanguageModelingLoss):
866
+ _keys_to_ignore_on_load_missing = [r"pooler"]
867
+
868
+ def __init__(self, config, *inputs, **kwargs):
869
+ super().__init__(config, *inputs, **kwargs)
870
+
871
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
872
+ self.lm_head = TFMPNetLMHead(config, self.mpnet.embeddings, name="lm_head")
873
+
874
+ def get_lm_head(self):
875
+ return self.lm_head
876
+
877
+ def get_prefix_bias_name(self):
878
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
879
+ return self.name + "/" + self.lm_head.name
880
+
881
+ @unpack_inputs
882
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
883
+ @add_code_sample_docstrings(
884
+ checkpoint=_CHECKPOINT_FOR_DOC,
885
+ output_type=TFMaskedLMOutput,
886
+ config_class=_CONFIG_FOR_DOC,
887
+ )
888
+ def call(
889
+ self,
890
+ input_ids: TFModelInputType | None = None,
891
+ attention_mask: np.ndarray | tf.Tensor | None = None,
892
+ position_ids: np.ndarray | tf.Tensor | None = None,
893
+ head_mask: np.ndarray | tf.Tensor | None = None,
894
+ inputs_embeds: tf.Tensor | None = None,
895
+ output_attentions: Optional[bool] = None,
896
+ output_hidden_states: Optional[bool] = None,
897
+ return_dict: Optional[bool] = None,
898
+ labels: tf.Tensor | None = None,
899
+ training: bool = False,
900
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
901
+ r"""
902
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
903
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
904
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
905
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
906
+ """
907
+ outputs = self.mpnet(
908
+ input_ids,
909
+ attention_mask=attention_mask,
910
+ position_ids=position_ids,
911
+ head_mask=head_mask,
912
+ inputs_embeds=inputs_embeds,
913
+ output_attentions=output_attentions,
914
+ output_hidden_states=output_hidden_states,
915
+ return_dict=return_dict,
916
+ training=training,
917
+ )
918
+ sequence_output = outputs[0]
919
+ prediction_scores = self.lm_head(sequence_output)
920
+
921
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
922
+
923
+ if not return_dict:
924
+ output = (prediction_scores,) + outputs[2:]
925
+ return ((loss,) + output) if loss is not None else output
926
+
927
+ return TFMaskedLMOutput(
928
+ loss=loss,
929
+ logits=prediction_scores,
930
+ hidden_states=outputs.hidden_states,
931
+ attentions=outputs.attentions,
932
+ )
933
+
934
+ def build(self, input_shape=None):
935
+ if self.built:
936
+ return
937
+ self.built = True
938
+ if getattr(self, "mpnet", None) is not None:
939
+ with tf.name_scope(self.mpnet.name):
940
+ self.mpnet.build(None)
941
+ if getattr(self, "lm_head", None) is not None:
942
+ with tf.name_scope(self.lm_head.name):
943
+ self.lm_head.build(None)
944
+
945
+
946
+ class TFMPNetClassificationHead(keras.layers.Layer):
947
+ """Head for sentence-level classification tasks."""
948
+
949
+ def __init__(self, config, **kwargs):
950
+ super().__init__(**kwargs)
951
+ self.dense = keras.layers.Dense(
952
+ config.hidden_size,
953
+ kernel_initializer=get_initializer(config.initializer_range),
954
+ activation="tanh",
955
+ name="dense",
956
+ )
957
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
958
+ self.out_proj = keras.layers.Dense(
959
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
960
+ )
961
+ self.config = config
962
+
963
+ def call(self, features, training=False):
964
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
965
+ x = self.dropout(x, training=training)
966
+ x = self.dense(x)
967
+ x = self.dropout(x, training=training)
968
+ x = self.out_proj(x)
969
+ return x
970
+
971
+ def build(self, input_shape=None):
972
+ if self.built:
973
+ return
974
+ self.built = True
975
+ if getattr(self, "dense", None) is not None:
976
+ with tf.name_scope(self.dense.name):
977
+ self.dense.build([None, None, self.config.hidden_size])
978
+ if getattr(self, "out_proj", None) is not None:
979
+ with tf.name_scope(self.out_proj.name):
980
+ self.out_proj.build([None, None, self.config.hidden_size])
981
+
982
+
983
+ @add_start_docstrings(
984
+ """
985
+ MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
986
+ output) e.g. for GLUE tasks.
987
+ """,
988
+ MPNET_START_DOCSTRING,
989
+ )
990
+ class TFMPNetForSequenceClassification(TFMPNetPreTrainedModel, TFSequenceClassificationLoss):
991
+ _keys_to_ignore_on_load_missing = [r"pooler"]
992
+
993
+ def __init__(self, config, *inputs, **kwargs):
994
+ super().__init__(config, *inputs, **kwargs)
995
+ self.num_labels = config.num_labels
996
+
997
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
998
+ self.classifier = TFMPNetClassificationHead(config, name="classifier")
999
+
1000
+ @unpack_inputs
1001
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1002
+ @add_code_sample_docstrings(
1003
+ checkpoint=_CHECKPOINT_FOR_DOC,
1004
+ output_type=TFSequenceClassifierOutput,
1005
+ config_class=_CONFIG_FOR_DOC,
1006
+ )
1007
+ def call(
1008
+ self,
1009
+ input_ids: TFModelInputType | None = None,
1010
+ attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
1011
+ position_ids: Optional[Union[np.array, tf.Tensor]] = None,
1012
+ head_mask: Optional[Union[np.array, tf.Tensor]] = None,
1013
+ inputs_embeds: tf.Tensor | None = None,
1014
+ output_attentions: Optional[bool] = None,
1015
+ output_hidden_states: Optional[bool] = None,
1016
+ return_dict: Optional[bool] = None,
1017
+ labels: tf.Tensor | None = None,
1018
+ training: bool = False,
1019
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1020
+ r"""
1021
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1022
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1023
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1024
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1025
+ """
1026
+ outputs = self.mpnet(
1027
+ input_ids,
1028
+ attention_mask=attention_mask,
1029
+ position_ids=position_ids,
1030
+ head_mask=head_mask,
1031
+ inputs_embeds=inputs_embeds,
1032
+ output_attentions=output_attentions,
1033
+ output_hidden_states=output_hidden_states,
1034
+ return_dict=return_dict,
1035
+ training=training,
1036
+ )
1037
+
1038
+ sequence_output = outputs[0]
1039
+ logits = self.classifier(sequence_output, training=training)
1040
+
1041
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1042
+
1043
+ if not return_dict:
1044
+ output = (logits,) + outputs[2:]
1045
+ return ((loss,) + output) if loss is not None else output
1046
+
1047
+ return TFSequenceClassifierOutput(
1048
+ loss=loss,
1049
+ logits=logits,
1050
+ hidden_states=outputs.hidden_states,
1051
+ attentions=outputs.attentions,
1052
+ )
1053
+
1054
+ def build(self, input_shape=None):
1055
+ if self.built:
1056
+ return
1057
+ self.built = True
1058
+ if getattr(self, "mpnet", None) is not None:
1059
+ with tf.name_scope(self.mpnet.name):
1060
+ self.mpnet.build(None)
1061
+ if getattr(self, "classifier", None) is not None:
1062
+ with tf.name_scope(self.classifier.name):
1063
+ self.classifier.build(None)
1064
+
1065
+
1066
+ @add_start_docstrings(
1067
+ """
1068
+ MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1069
+ softmax) e.g. for RocStories/SWAG tasks.
1070
+ """,
1071
+ MPNET_START_DOCSTRING,
1072
+ )
1073
+ class TFMPNetForMultipleChoice(TFMPNetPreTrainedModel, TFMultipleChoiceLoss):
1074
+ def __init__(self, config, *inputs, **kwargs):
1075
+ super().__init__(config, *inputs, **kwargs)
1076
+
1077
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
1078
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1079
+ self.classifier = keras.layers.Dense(
1080
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1081
+ )
1082
+ self.config = config
1083
+
1084
+ @unpack_inputs
1085
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1086
+ @add_code_sample_docstrings(
1087
+ checkpoint=_CHECKPOINT_FOR_DOC,
1088
+ output_type=TFMultipleChoiceModelOutput,
1089
+ config_class=_CONFIG_FOR_DOC,
1090
+ )
1091
+ def call(
1092
+ self,
1093
+ input_ids: TFModelInputType | None = None,
1094
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1095
+ position_ids: np.ndarray | tf.Tensor | None = None,
1096
+ head_mask: np.ndarray | tf.Tensor | None = None,
1097
+ inputs_embeds: tf.Tensor | None = None,
1098
+ output_attentions: Optional[bool] = None,
1099
+ output_hidden_states: Optional[bool] = None,
1100
+ return_dict: Optional[bool] = None,
1101
+ labels: tf.Tensor | None = None,
1102
+ training: bool = False,
1103
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
1104
+ r"""
1105
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1106
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1107
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1108
+ """
1109
+ if input_ids is not None:
1110
+ num_choices = shape_list(input_ids)[1]
1111
+ seq_length = shape_list(input_ids)[2]
1112
+ else:
1113
+ num_choices = shape_list(inputs_embeds)[1]
1114
+ seq_length = shape_list(inputs_embeds)[2]
1115
+
1116
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1117
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1118
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1119
+ flat_inputs_embeds = (
1120
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1121
+ if inputs_embeds is not None
1122
+ else None
1123
+ )
1124
+ outputs = self.mpnet(
1125
+ flat_input_ids,
1126
+ flat_attention_mask,
1127
+ flat_position_ids,
1128
+ head_mask,
1129
+ flat_inputs_embeds,
1130
+ output_attentions,
1131
+ output_hidden_states,
1132
+ return_dict=return_dict,
1133
+ training=training,
1134
+ )
1135
+ pooled_output = outputs[1]
1136
+ pooled_output = self.dropout(pooled_output, training=training)
1137
+ logits = self.classifier(pooled_output)
1138
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1139
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1140
+
1141
+ if not return_dict:
1142
+ output = (reshaped_logits,) + outputs[2:]
1143
+ return ((loss,) + output) if loss is not None else output
1144
+
1145
+ return TFMultipleChoiceModelOutput(
1146
+ loss=loss,
1147
+ logits=reshaped_logits,
1148
+ hidden_states=outputs.hidden_states,
1149
+ attentions=outputs.attentions,
1150
+ )
1151
+
1152
+ def build(self, input_shape=None):
1153
+ if self.built:
1154
+ return
1155
+ self.built = True
1156
+ if getattr(self, "mpnet", None) is not None:
1157
+ with tf.name_scope(self.mpnet.name):
1158
+ self.mpnet.build(None)
1159
+ if getattr(self, "classifier", None) is not None:
1160
+ with tf.name_scope(self.classifier.name):
1161
+ self.classifier.build([None, None, self.config.hidden_size])
1162
+
1163
+
1164
+ @add_start_docstrings(
1165
+ """
1166
+ MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1167
+ Named-Entity-Recognition (NER) tasks.
1168
+ """,
1169
+ MPNET_START_DOCSTRING,
1170
+ )
1171
+ class TFMPNetForTokenClassification(TFMPNetPreTrainedModel, TFTokenClassificationLoss):
1172
+ _keys_to_ignore_on_load_missing = [r"pooler"]
1173
+
1174
+ def __init__(self, config, *inputs, **kwargs):
1175
+ super().__init__(config, *inputs, **kwargs)
1176
+
1177
+ self.num_labels = config.num_labels
1178
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
1179
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1180
+ self.classifier = keras.layers.Dense(
1181
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1182
+ )
1183
+ self.config = config
1184
+
1185
+ @unpack_inputs
1186
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1187
+ @add_code_sample_docstrings(
1188
+ checkpoint=_CHECKPOINT_FOR_DOC,
1189
+ output_type=TFTokenClassifierOutput,
1190
+ config_class=_CONFIG_FOR_DOC,
1191
+ )
1192
+ def call(
1193
+ self,
1194
+ input_ids: TFModelInputType | None = None,
1195
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1196
+ position_ids: np.ndarray | tf.Tensor | None = None,
1197
+ head_mask: np.ndarray | tf.Tensor | None = None,
1198
+ inputs_embeds: tf.Tensor | None = None,
1199
+ output_attentions: Optional[bool] = None,
1200
+ output_hidden_states: Optional[bool] = None,
1201
+ return_dict: Optional[bool] = None,
1202
+ labels: tf.Tensor | None = None,
1203
+ training: bool = False,
1204
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1205
+ r"""
1206
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1207
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1208
+ """
1209
+ outputs = self.mpnet(
1210
+ input_ids=input_ids,
1211
+ attention_mask=attention_mask,
1212
+ position_ids=position_ids,
1213
+ head_mask=head_mask,
1214
+ inputs_embeds=inputs_embeds,
1215
+ output_attentions=output_attentions,
1216
+ output_hidden_states=output_hidden_states,
1217
+ return_dict=return_dict,
1218
+ training=training,
1219
+ )
1220
+ sequence_output = outputs[0]
1221
+
1222
+ sequence_output = self.dropout(sequence_output, training=training)
1223
+ logits = self.classifier(sequence_output)
1224
+
1225
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1226
+
1227
+ if not return_dict:
1228
+ output = (logits,) + outputs[1:]
1229
+ return ((loss,) + output) if loss is not None else output
1230
+
1231
+ return TFTokenClassifierOutput(
1232
+ loss=loss,
1233
+ logits=logits,
1234
+ hidden_states=outputs.hidden_states,
1235
+ attentions=outputs.attentions,
1236
+ )
1237
+
1238
+ def build(self, input_shape=None):
1239
+ if self.built:
1240
+ return
1241
+ self.built = True
1242
+ if getattr(self, "mpnet", None) is not None:
1243
+ with tf.name_scope(self.mpnet.name):
1244
+ self.mpnet.build(None)
1245
+ if getattr(self, "classifier", None) is not None:
1246
+ with tf.name_scope(self.classifier.name):
1247
+ self.classifier.build([None, None, self.config.hidden_size])
1248
+
1249
+
1250
+ @add_start_docstrings(
1251
+ """
1252
+ MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1253
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1254
+ """,
1255
+ MPNET_START_DOCSTRING,
1256
+ )
1257
+ class TFMPNetForQuestionAnswering(TFMPNetPreTrainedModel, TFQuestionAnsweringLoss):
1258
+ _keys_to_ignore_on_load_missing = [r"pooler"]
1259
+
1260
+ def __init__(self, config, *inputs, **kwargs):
1261
+ super().__init__(config, *inputs, **kwargs)
1262
+ self.num_labels = config.num_labels
1263
+
1264
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
1265
+ self.qa_outputs = keras.layers.Dense(
1266
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1267
+ )
1268
+ self.config = config
1269
+
1270
+ @unpack_inputs
1271
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1272
+ @add_code_sample_docstrings(
1273
+ checkpoint=_CHECKPOINT_FOR_DOC,
1274
+ output_type=TFQuestionAnsweringModelOutput,
1275
+ config_class=_CONFIG_FOR_DOC,
1276
+ )
1277
+ def call(
1278
+ self,
1279
+ input_ids: TFModelInputType | None = None,
1280
+ attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
1281
+ position_ids: Optional[Union[np.array, tf.Tensor]] = None,
1282
+ head_mask: Optional[Union[np.array, tf.Tensor]] = None,
1283
+ inputs_embeds: tf.Tensor | None = None,
1284
+ output_attentions: Optional[bool] = None,
1285
+ output_hidden_states: Optional[bool] = None,
1286
+ return_dict: Optional[bool] = None,
1287
+ start_positions: tf.Tensor | None = None,
1288
+ end_positions: tf.Tensor | None = None,
1289
+ training: bool = False,
1290
+ **kwargs,
1291
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1292
+ r"""
1293
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1294
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1295
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1296
+ are not taken into account for computing the loss.
1297
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1298
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1299
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1300
+ are not taken into account for computing the loss.
1301
+ """
1302
+ outputs = self.mpnet(
1303
+ input_ids,
1304
+ attention_mask=attention_mask,
1305
+ position_ids=position_ids,
1306
+ head_mask=head_mask,
1307
+ inputs_embeds=inputs_embeds,
1308
+ output_attentions=output_attentions,
1309
+ output_hidden_states=output_hidden_states,
1310
+ return_dict=return_dict,
1311
+ training=training,
1312
+ )
1313
+ sequence_output = outputs[0]
1314
+
1315
+ logits = self.qa_outputs(sequence_output)
1316
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1317
+ start_logits = tf.squeeze(start_logits, axis=-1)
1318
+ end_logits = tf.squeeze(end_logits, axis=-1)
1319
+ loss = None
1320
+
1321
+ if start_positions is not None and end_positions is not None:
1322
+ labels = {"start_position": start_positions, "end_position": end_positions}
1323
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1324
+
1325
+ if not return_dict:
1326
+ output = (start_logits, end_logits) + outputs[2:]
1327
+ return ((loss,) + output) if loss is not None else output
1328
+
1329
+ return TFQuestionAnsweringModelOutput(
1330
+ loss=loss,
1331
+ start_logits=start_logits,
1332
+ end_logits=end_logits,
1333
+ hidden_states=outputs.hidden_states,
1334
+ attentions=outputs.attentions,
1335
+ )
1336
+
1337
+ def build(self, input_shape=None):
1338
+ if self.built:
1339
+ return
1340
+ self.built = True
1341
+ if getattr(self, "mpnet", None) is not None:
1342
+ with tf.name_scope(self.mpnet.name):
1343
+ self.mpnet.build(None)
1344
+ if getattr(self, "qa_outputs", None) is not None:
1345
+ with tf.name_scope(self.qa_outputs.name):
1346
+ self.qa_outputs.build([None, None, self.config.hidden_size])
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Tokenization classes for MPNet."""
17
+
18
+ import collections
19
+ import os
20
+ import unicodedata
21
+ from typing import List, Optional, Tuple
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {
32
+ "vocab_file": {
33
+ "microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/vocab.txt",
34
+ }
35
+ }
36
+
37
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
38
+ "microsoft/mpnet-base": 512,
39
+ }
40
+
41
+ PRETRAINED_INIT_CONFIGURATION = {
42
+ "microsoft/mpnet-base": {"do_lower_case": True},
43
+ }
44
+
45
+
46
+ def load_vocab(vocab_file):
47
+ """Loads a vocabulary file into a dictionary."""
48
+ vocab = collections.OrderedDict()
49
+ with open(vocab_file, "r", encoding="utf-8") as reader:
50
+ tokens = reader.readlines()
51
+ for index, token in enumerate(tokens):
52
+ token = token.rstrip("\n")
53
+ vocab[token] = index
54
+ return vocab
55
+
56
+
57
+ def whitespace_tokenize(text):
58
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
59
+ text = text.strip()
60
+ if not text:
61
+ return []
62
+ tokens = text.split()
63
+ return tokens
64
+
65
+
66
+ class MPNetTokenizer(PreTrainedTokenizer):
67
+ """
68
+
69
+ This tokenizer inherits from [`BertTokenizer`] which contains most of the methods. Users should refer to the
70
+ superclass for more information regarding methods.
71
+
72
+ Args:
73
+ vocab_file (`str`):
74
+ Path to the vocabulary file.
75
+ do_lower_case (`bool`, *optional*, defaults to `True`):
76
+ Whether or not to lowercase the input when tokenizing.
77
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
78
+ Whether or not to do basic tokenization before WordPiece.
79
+ never_split (`Iterable`, *optional*):
80
+ Collection of tokens which will never be split during tokenization. Only has an effect when
81
+ `do_basic_tokenize=True`
82
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
83
+ The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
84
+
85
+ <Tip>
86
+
87
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
88
+ sequence. The token used is the `cls_token`.
89
+
90
+ </Tip>
91
+
92
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
93
+ The end of sequence token.
94
+
95
+ <Tip>
96
+
97
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
98
+ The token used is the `sep_token`.
99
+
100
+ </Tip>
101
+
102
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
103
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
104
+ sequence classification or for a text and a question for question answering. It is also used as the last
105
+ token of a sequence built with special tokens.
106
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
107
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
108
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
109
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
110
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
111
+ token instead.
112
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
113
+ The token used for padding, for example when batching sequences of different lengths.
114
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
115
+ The token used for masking values. This is the token used when training this model with masked language
116
+ modeling. This is the token which the model will try to predict.
117
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
118
+ Whether or not to tokenize Chinese characters.
119
+
120
+ This should likely be deactivated for Japanese (see this
121
+ [issue](https://github.com/huggingface/transformers/issues/328)).
122
+ strip_accents (`bool`, *optional*):
123
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
124
+ value for `lowercase` (as in the original BERT).
125
+ """
126
+
127
+ vocab_files_names = VOCAB_FILES_NAMES
128
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
129
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
130
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
131
+ model_input_names = ["input_ids", "attention_mask"]
132
+
133
+ def __init__(
134
+ self,
135
+ vocab_file,
136
+ do_lower_case=True,
137
+ do_basic_tokenize=True,
138
+ never_split=None,
139
+ bos_token="<s>",
140
+ eos_token="</s>",
141
+ sep_token="</s>",
142
+ cls_token="<s>",
143
+ unk_token="[UNK]",
144
+ pad_token="<pad>",
145
+ mask_token="<mask>",
146
+ tokenize_chinese_chars=True,
147
+ strip_accents=None,
148
+ **kwargs,
149
+ ):
150
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
151
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
152
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
153
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
154
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
155
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
156
+
157
+ # Mask token behave like a normal word, i.e. include the space before it
158
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
159
+
160
+ if not os.path.isfile(vocab_file):
161
+ raise ValueError(
162
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
163
+ " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
164
+ )
165
+ self.vocab = load_vocab(vocab_file)
166
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
167
+ self.do_basic_tokenize = do_basic_tokenize
168
+ if do_basic_tokenize:
169
+ self.basic_tokenizer = BasicTokenizer(
170
+ do_lower_case=do_lower_case,
171
+ never_split=never_split,
172
+ tokenize_chinese_chars=tokenize_chinese_chars,
173
+ strip_accents=strip_accents,
174
+ )
175
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
176
+
177
+ super().__init__(
178
+ do_lower_case=do_lower_case,
179
+ do_basic_tokenize=do_basic_tokenize,
180
+ never_split=never_split,
181
+ bos_token=bos_token,
182
+ eos_token=eos_token,
183
+ unk_token=unk_token,
184
+ sep_token=sep_token,
185
+ cls_token=cls_token,
186
+ pad_token=pad_token,
187
+ mask_token=mask_token,
188
+ tokenize_chinese_chars=tokenize_chinese_chars,
189
+ strip_accents=strip_accents,
190
+ **kwargs,
191
+ )
192
+
193
+ @property
194
+ def do_lower_case(self):
195
+ return self.basic_tokenizer.do_lower_case
196
+
197
+ @property
198
+ def vocab_size(self):
199
+ return len(self.vocab)
200
+
201
+ def get_vocab(self):
202
+ # "<mask>" is part of the vocab, but was wrongfully added at a wrong index in the fast saved version
203
+ vocab = self.added_tokens_encoder.copy()
204
+ vocab.update(self.vocab)
205
+ return vocab
206
+
207
+ def _tokenize(self, text):
208
+ split_tokens = []
209
+ if self.do_basic_tokenize:
210
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
211
+ # If the token is part of the never_split set
212
+ if token in self.basic_tokenizer.never_split:
213
+ split_tokens.append(token)
214
+ else:
215
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
216
+ else:
217
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
218
+ return split_tokens
219
+
220
+ def _convert_token_to_id(self, token):
221
+ """Converts a token (str) in an id using the vocab."""
222
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
223
+
224
+ def _convert_id_to_token(self, index):
225
+ """Converts an index (integer) in a token (str) using the vocab."""
226
+ return self.ids_to_tokens.get(index, self.unk_token)
227
+
228
+ def convert_tokens_to_string(self, tokens):
229
+ """Converts a sequence of tokens (string) in a single string."""
230
+ out_string = " ".join(tokens).replace(" ##", "").strip()
231
+ return out_string
232
+
233
+ def build_inputs_with_special_tokens(
234
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
235
+ ) -> List[int]:
236
+ """
237
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
238
+ adding special tokens. A MPNet sequence has the following format:
239
+
240
+ - single sequence: `<s> X </s>`
241
+ - pair of sequences: `<s> A </s></s> B </s>`
242
+
243
+ Args:
244
+ token_ids_0 (`List[int]`):
245
+ List of IDs to which the special tokens will be added
246
+ token_ids_1 (`List[int]`, *optional*):
247
+ Optional second list of IDs for sequence pairs.
248
+
249
+ Returns:
250
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
251
+ """
252
+ if token_ids_1 is None:
253
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
254
+ cls = [self.cls_token_id]
255
+ sep = [self.sep_token_id]
256
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
257
+
258
+ def get_special_tokens_mask(
259
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
260
+ ) -> List[int]:
261
+ """
262
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
263
+ special tokens using the tokenizer `prepare_for_model` methods.
264
+
265
+ Args:
266
+ token_ids_0 (`List[int]`):
267
+ List of ids.
268
+ token_ids_1 (`List[int]`, *optional*):
269
+ Optional second list of IDs for sequence pairs.
270
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
271
+ Set to True if the token list is already formatted with special tokens for the model
272
+
273
+ Returns:
274
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
275
+ """
276
+ if already_has_special_tokens:
277
+ return super().get_special_tokens_mask(
278
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
279
+ )
280
+
281
+ if token_ids_1 is None:
282
+ return [1] + ([0] * len(token_ids_0)) + [1]
283
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
284
+
285
+ def create_token_type_ids_from_sequences(
286
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
287
+ ) -> List[int]:
288
+ """
289
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not
290
+ make use of token type ids, therefore a list of zeros is returned.
291
+
292
+ Args:
293
+ token_ids_0 (`List[int]`):
294
+ List of ids.
295
+ token_ids_1 (`List[int]`, *optional*):
296
+ Optional second list of IDs for sequence pairs.
297
+
298
+ Returns:
299
+ `List[int]`: List of zeros.
300
+ """
301
+ sep = [self.sep_token_id]
302
+ cls = [self.cls_token_id]
303
+
304
+ if token_ids_1 is None:
305
+ return len(cls + token_ids_0 + sep) * [0]
306
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
307
+
308
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
309
+ index = 0
310
+ if os.path.isdir(save_directory):
311
+ vocab_file = os.path.join(
312
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
313
+ )
314
+ else:
315
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
316
+ with open(vocab_file, "w", encoding="utf-8") as writer:
317
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
318
+ if index != token_index:
319
+ logger.warning(
320
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
321
+ " Please check that the vocabulary is not corrupted!"
322
+ )
323
+ index = token_index
324
+ writer.write(token + "\n")
325
+ index += 1
326
+ return (vocab_file,)
327
+
328
+
329
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
330
+ class BasicTokenizer(object):
331
+ """
332
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
333
+
334
+ Args:
335
+ do_lower_case (`bool`, *optional*, defaults to `True`):
336
+ Whether or not to lowercase the input when tokenizing.
337
+ never_split (`Iterable`, *optional*):
338
+ Collection of tokens which will never be split during tokenization. Only has an effect when
339
+ `do_basic_tokenize=True`
340
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
341
+ Whether or not to tokenize Chinese characters.
342
+
343
+ This should likely be deactivated for Japanese (see this
344
+ [issue](https://github.com/huggingface/transformers/issues/328)).
345
+ strip_accents (`bool`, *optional*):
346
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
347
+ value for `lowercase` (as in the original BERT).
348
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
349
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
350
+ the full context of the words, such as contractions.
351
+ """
352
+
353
+ def __init__(
354
+ self,
355
+ do_lower_case=True,
356
+ never_split=None,
357
+ tokenize_chinese_chars=True,
358
+ strip_accents=None,
359
+ do_split_on_punc=True,
360
+ ):
361
+ if never_split is None:
362
+ never_split = []
363
+ self.do_lower_case = do_lower_case
364
+ self.never_split = set(never_split)
365
+ self.tokenize_chinese_chars = tokenize_chinese_chars
366
+ self.strip_accents = strip_accents
367
+ self.do_split_on_punc = do_split_on_punc
368
+
369
+ def tokenize(self, text, never_split=None):
370
+ """
371
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
372
+
373
+ Args:
374
+ never_split (`List[str]`, *optional*)
375
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
376
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
377
+ """
378
+ # union() returns a new set by concatenating the two sets.
379
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
380
+ text = self._clean_text(text)
381
+
382
+ # This was added on November 1st, 2018 for the multilingual and Chinese
383
+ # models. This is also applied to the English models now, but it doesn't
384
+ # matter since the English models were not trained on any Chinese data
385
+ # and generally don't have any Chinese data in them (there are Chinese
386
+ # characters in the vocabulary because Wikipedia does have some Chinese
387
+ # words in the English Wikipedia.).
388
+ if self.tokenize_chinese_chars:
389
+ text = self._tokenize_chinese_chars(text)
390
+ # prevents treating the same character with different unicode codepoints as different characters
391
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
392
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
393
+ split_tokens = []
394
+ for token in orig_tokens:
395
+ if token not in never_split:
396
+ if self.do_lower_case:
397
+ token = token.lower()
398
+ if self.strip_accents is not False:
399
+ token = self._run_strip_accents(token)
400
+ elif self.strip_accents:
401
+ token = self._run_strip_accents(token)
402
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
403
+
404
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
405
+ return output_tokens
406
+
407
+ def _run_strip_accents(self, text):
408
+ """Strips accents from a piece of text."""
409
+ text = unicodedata.normalize("NFD", text)
410
+ output = []
411
+ for char in text:
412
+ cat = unicodedata.category(char)
413
+ if cat == "Mn":
414
+ continue
415
+ output.append(char)
416
+ return "".join(output)
417
+
418
+ def _run_split_on_punc(self, text, never_split=None):
419
+ """Splits punctuation on a piece of text."""
420
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
421
+ return [text]
422
+ chars = list(text)
423
+ i = 0
424
+ start_new_word = True
425
+ output = []
426
+ while i < len(chars):
427
+ char = chars[i]
428
+ if _is_punctuation(char):
429
+ output.append([char])
430
+ start_new_word = True
431
+ else:
432
+ if start_new_word:
433
+ output.append([])
434
+ start_new_word = False
435
+ output[-1].append(char)
436
+ i += 1
437
+
438
+ return ["".join(x) for x in output]
439
+
440
+ def _tokenize_chinese_chars(self, text):
441
+ """Adds whitespace around any CJK character."""
442
+ output = []
443
+ for char in text:
444
+ cp = ord(char)
445
+ if self._is_chinese_char(cp):
446
+ output.append(" ")
447
+ output.append(char)
448
+ output.append(" ")
449
+ else:
450
+ output.append(char)
451
+ return "".join(output)
452
+
453
+ def _is_chinese_char(self, cp):
454
+ """Checks whether CP is the codepoint of a CJK character."""
455
+ # This defines a "chinese character" as anything in the CJK Unicode block:
456
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
457
+ #
458
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
459
+ # despite its name. The modern Korean Hangul alphabet is a different block,
460
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
461
+ # space-separated words, so they are not treated specially and handled
462
+ # like the all of the other languages.
463
+ if (
464
+ (cp >= 0x4E00 and cp <= 0x9FFF)
465
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
466
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
467
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
468
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
469
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
470
+ or (cp >= 0xF900 and cp <= 0xFAFF)
471
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
472
+ ): #
473
+ return True
474
+
475
+ return False
476
+
477
+ def _clean_text(self, text):
478
+ """Performs invalid character removal and whitespace cleanup on text."""
479
+ output = []
480
+ for char in text:
481
+ cp = ord(char)
482
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
483
+ continue
484
+ if _is_whitespace(char):
485
+ output.append(" ")
486
+ else:
487
+ output.append(char)
488
+ return "".join(output)
489
+
490
+
491
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
492
+ class WordpieceTokenizer(object):
493
+ """Runs WordPiece tokenization."""
494
+
495
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
496
+ self.vocab = vocab
497
+ self.unk_token = unk_token
498
+ self.max_input_chars_per_word = max_input_chars_per_word
499
+
500
+ def tokenize(self, text):
501
+ """
502
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
503
+ tokenization using the given vocabulary.
504
+
505
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
506
+
507
+ Args:
508
+ text: A single token or whitespace separated tokens. This should have
509
+ already been passed through *BasicTokenizer*.
510
+
511
+ Returns:
512
+ A list of wordpiece tokens.
513
+ """
514
+
515
+ output_tokens = []
516
+ for token in whitespace_tokenize(text):
517
+ chars = list(token)
518
+ if len(chars) > self.max_input_chars_per_word:
519
+ output_tokens.append(self.unk_token)
520
+ continue
521
+
522
+ is_bad = False
523
+ start = 0
524
+ sub_tokens = []
525
+ while start < len(chars):
526
+ end = len(chars)
527
+ cur_substr = None
528
+ while start < end:
529
+ substr = "".join(chars[start:end])
530
+ if start > 0:
531
+ substr = "##" + substr
532
+ if substr in self.vocab:
533
+ cur_substr = substr
534
+ break
535
+ end -= 1
536
+ if cur_substr is None:
537
+ is_bad = True
538
+ break
539
+ sub_tokens.append(cur_substr)
540
+ start = end
541
+
542
+ if is_bad:
543
+ output_tokens.append(self.unk_token)
544
+ else:
545
+ output_tokens.extend(sub_tokens)
546
+ return output_tokens
env-llmeval/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet_fast.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Fast Tokenization classes for MPNet."""
17
+
18
+ import json
19
+ from typing import List, Optional, Tuple
20
+
21
+ from tokenizers import normalizers
22
+
23
+ from ...tokenization_utils import AddedToken
24
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
25
+ from ...utils import logging
26
+ from .tokenization_mpnet import MPNetTokenizer
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
32
+
33
+ PRETRAINED_VOCAB_FILES_MAP = {
34
+ "vocab_file": {
35
+ "microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/vocab.txt",
36
+ },
37
+ "tokenizer_file": {
38
+ "microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/tokenizer.json",
39
+ },
40
+ }
41
+
42
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
43
+ "microsoft/mpnet-base": 512,
44
+ }
45
+
46
+ PRETRAINED_INIT_CONFIGURATION = {
47
+ "microsoft/mpnet-base": {"do_lower_case": True},
48
+ }
49
+
50
+
51
+ class MPNetTokenizerFast(PreTrainedTokenizerFast):
52
+ r"""
53
+ Construct a "fast" MPNet tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
54
+
55
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
56
+ refer to this superclass for more information regarding those methods.
57
+
58
+ Args:
59
+ vocab_file (`str`):
60
+ File containing the vocabulary.
61
+ do_lower_case (`bool`, *optional*, defaults to `True`):
62
+ Whether or not to lowercase the input when tokenizing.
63
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
64
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
65
+
66
+ <Tip>
67
+
68
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
69
+ sequence. The token used is the `cls_token`.
70
+
71
+ </Tip>
72
+
73
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
74
+ The end of sequence token.
75
+
76
+ <Tip>
77
+
78
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
79
+ The token used is the `sep_token`.
80
+
81
+ </Tip>
82
+
83
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
84
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
85
+ sequence classification or for a text and a question for question answering. It is also used as the last
86
+ token of a sequence built with special tokens.
87
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
88
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
89
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
90
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
91
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
92
+ token instead.
93
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
94
+ The token used for padding, for example when batching sequences of different lengths.
95
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
96
+ The token used for masking values. This is the token used when training this model with masked language
97
+ modeling. This is the token which the model will try to predict.
98
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
99
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
100
+ issue](https://github.com/huggingface/transformers/issues/328)).
101
+ strip_accents (`bool`, *optional*):
102
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
103
+ value for `lowercase` (as in the original BERT).
104
+ """
105
+
106
+ vocab_files_names = VOCAB_FILES_NAMES
107
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
108
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
109
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
110
+ slow_tokenizer_class = MPNetTokenizer
111
+ model_input_names = ["input_ids", "attention_mask"]
112
+
113
+ def __init__(
114
+ self,
115
+ vocab_file=None,
116
+ tokenizer_file=None,
117
+ do_lower_case=True,
118
+ bos_token="<s>",
119
+ eos_token="</s>",
120
+ sep_token="</s>",
121
+ cls_token="<s>",
122
+ unk_token="[UNK]",
123
+ pad_token="<pad>",
124
+ mask_token="<mask>",
125
+ tokenize_chinese_chars=True,
126
+ strip_accents=None,
127
+ **kwargs,
128
+ ):
129
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
130
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
131
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
132
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
133
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
134
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
135
+
136
+ # Mask token behave like a normal word, i.e. include the space before it
137
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
138
+
139
+ super().__init__(
140
+ vocab_file,
141
+ tokenizer_file=tokenizer_file,
142
+ do_lower_case=do_lower_case,
143
+ bos_token=bos_token,
144
+ eos_token=eos_token,
145
+ sep_token=sep_token,
146
+ cls_token=cls_token,
147
+ unk_token=unk_token,
148
+ pad_token=pad_token,
149
+ mask_token=mask_token,
150
+ tokenize_chinese_chars=tokenize_chinese_chars,
151
+ strip_accents=strip_accents,
152
+ **kwargs,
153
+ )
154
+
155
+ pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
156
+ if (
157
+ pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
158
+ or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
159
+ ):
160
+ pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
161
+ pre_tok_state["lowercase"] = do_lower_case
162
+ pre_tok_state["strip_accents"] = strip_accents
163
+ self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
164
+
165
+ self.do_lower_case = do_lower_case
166
+
167
+ @property
168
+ def mask_token(self) -> str:
169
+ """
170
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
171
+ having been set.
172
+
173
+ MPNet tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
174
+ comprise the space before the *<mask>*.
175
+ """
176
+ if self._mask_token is None:
177
+ if self.verbose:
178
+ logger.error("Using mask_token, but it is not set yet.")
179
+ return None
180
+ return str(self._mask_token)
181
+
182
+ @mask_token.setter
183
+ def mask_token(self, value):
184
+ """
185
+ Overriding the default behavior of the mask token to have it eat the space before it.
186
+
187
+ This is needed to preserve backward compatibility with all the previously used models based on MPNet.
188
+ """
189
+ # Mask token behave like a normal word, i.e. include the space before it
190
+ # So we set lstrip to True
191
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
192
+ self._mask_token = value
193
+
194
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
195
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
196
+ if token_ids_1 is None:
197
+ return output
198
+
199
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
200
+
201
+ def create_token_type_ids_from_sequences(
202
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
203
+ ) -> List[int]:
204
+ """
205
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not
206
+ make use of token type ids, therefore a list of zeros is returned
207
+
208
+ Args:
209
+ token_ids_0 (`List[int]`):
210
+ List of ids.
211
+ token_ids_1 (`List[int]`, *optional*):
212
+ Optional second list of IDs for sequence pairs
213
+
214
+ Returns:
215
+ `List[int]`: List of zeros.
216
+ """
217
+ sep = [self.sep_token_id]
218
+ cls = [self.cls_token_id]
219
+
220
+ if token_ids_1 is None:
221
+ return len(cls + token_ids_0 + sep) * [0]
222
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
223
+
224
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
225
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
226
+ return tuple(files)
env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ from typing import TYPE_CHECKING
19
+
20
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
21
+
22
+
23
+ _import_structure = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
24
+
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_timm_backbone"] = ["TimmBackbone"]
33
+
34
+
35
+ if TYPE_CHECKING:
36
+ from .configuration_timm_backbone import TimmBackboneConfig
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ from .modeling_timm_backbone import TimmBackbone
45
+
46
+ else:
47
+ import sys
48
+
49
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (775 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/__pycache__/configuration_timm_backbone.cpython-310.pyc ADDED
Binary file (2.74 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/__pycache__/modeling_timm_backbone.cpython-310.pyc ADDED
Binary file (4.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/configuration_timm_backbone.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ Configuration for Backbone models"""
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class TimmBackboneConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`].
28
+
29
+ It is used to instantiate a timm backbone model according to the specified arguments, defining the model.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+ Args:
35
+ backbone (`str`, *optional*):
36
+ The timm checkpoint to load.
37
+ num_channels (`int`, *optional*, defaults to 3):
38
+ The number of input channels.
39
+ features_only (`bool`, *optional*, defaults to `True`):
40
+ Whether to output only the features or also the logits.
41
+ use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
42
+ Whether to use a pretrained backbone.
43
+ out_indices (`List[int]`, *optional*):
44
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
45
+ many stages the model has). Will default to the last stage if unset.
46
+ freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`):
47
+ Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`.
48
+
49
+ Example:
50
+ ```python
51
+ >>> from transformers import TimmBackboneConfig, TimmBackbone
52
+
53
+ >>> # Initializing a timm backbone
54
+ >>> configuration = TimmBackboneConfig("resnet50")
55
+
56
+ >>> # Initializing a model from the configuration
57
+ >>> model = TimmBackbone(configuration)
58
+
59
+ >>> # Accessing the model configuration
60
+ >>> configuration = model.config
61
+ ```
62
+ """
63
+
64
+ model_type = "timm_backbone"
65
+
66
+ def __init__(
67
+ self,
68
+ backbone=None,
69
+ num_channels=3,
70
+ features_only=True,
71
+ use_pretrained_backbone=True,
72
+ out_indices=None,
73
+ freeze_batch_norm_2d=False,
74
+ **kwargs,
75
+ ):
76
+ super().__init__(**kwargs)
77
+ self.backbone = backbone
78
+ self.num_channels = num_channels
79
+ self.features_only = features_only
80
+ self.use_pretrained_backbone = use_pretrained_backbone
81
+ self.use_timm_backbone = True
82
+ self.out_indices = out_indices if out_indices is not None else (-1,)
83
+ self.freeze_batch_norm_2d = freeze_batch_norm_2d
env-llmeval/lib/python3.10/site-packages/transformers/models/timm_backbone/modeling_timm_backbone.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Optional, Tuple, Union
17
+
18
+ import torch
19
+
20
+ from ...modeling_outputs import BackboneOutput
21
+ from ...modeling_utils import PreTrainedModel
22
+ from ...utils import is_timm_available, is_torch_available, requires_backends
23
+ from ...utils.backbone_utils import BackboneMixin
24
+ from .configuration_timm_backbone import TimmBackboneConfig
25
+
26
+
27
+ if is_timm_available():
28
+ import timm
29
+
30
+
31
+ if is_torch_available():
32
+ from torch import Tensor
33
+
34
+
35
+ class TimmBackbone(PreTrainedModel, BackboneMixin):
36
+ """
37
+ Wrapper class for timm models to be used as backbones. This enables using the timm models interchangeably with the
38
+ other models in the library keeping the same API.
39
+ """
40
+
41
+ main_input_name = "pixel_values"
42
+ supports_gradient_checkpointing = False
43
+ config_class = TimmBackboneConfig
44
+
45
+ def __init__(self, config, **kwargs):
46
+ requires_backends(self, "timm")
47
+ super().__init__(config)
48
+ self.config = config
49
+
50
+ if config.backbone is None:
51
+ raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
52
+
53
+ if config.backbone not in timm.list_models():
54
+ raise ValueError(f"backbone {config.backbone} is not supported by timm.")
55
+
56
+ if hasattr(config, "out_features") and config.out_features is not None:
57
+ raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
58
+
59
+ pretrained = getattr(config, "use_pretrained_backbone", None)
60
+ if pretrained is None:
61
+ raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
62
+
63
+ # We just take the final layer by default. This matches the default for the transformers models.
64
+ out_indices = config.out_indices if getattr(config, "out_indices", None) is not None else (-1,)
65
+
66
+ self._backbone = timm.create_model(
67
+ config.backbone,
68
+ pretrained=pretrained,
69
+ # This is currently not possible for transformer architectures.
70
+ features_only=config.features_only,
71
+ in_chans=config.num_channels,
72
+ out_indices=out_indices,
73
+ **kwargs,
74
+ )
75
+
76
+ # Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively
77
+ if getattr(config, "freeze_batch_norm_2d", False):
78
+ self.freeze_batch_norm_2d()
79
+
80
+ # These are used to control the output of the model when called. If output_hidden_states is True, then
81
+ # return_layers is modified to include all layers.
82
+ self._return_layers = self._backbone.return_layers
83
+ self._all_layers = {layer["module"]: str(i) for i, layer in enumerate(self._backbone.feature_info.info)}
84
+ super()._init_backbone(config)
85
+
86
+ @classmethod
87
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
88
+ requires_backends(cls, ["vision", "timm"])
89
+ from ...models.timm_backbone import TimmBackboneConfig
90
+
91
+ config = kwargs.pop("config", TimmBackboneConfig())
92
+
93
+ use_timm = kwargs.pop("use_timm_backbone", True)
94
+ if not use_timm:
95
+ raise ValueError("use_timm_backbone must be True for timm backbones")
96
+
97
+ num_channels = kwargs.pop("num_channels", config.num_channels)
98
+ features_only = kwargs.pop("features_only", config.features_only)
99
+ use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone)
100
+ out_indices = kwargs.pop("out_indices", config.out_indices)
101
+ config = TimmBackboneConfig(
102
+ backbone=pretrained_model_name_or_path,
103
+ num_channels=num_channels,
104
+ features_only=features_only,
105
+ use_pretrained_backbone=use_pretrained_backbone,
106
+ out_indices=out_indices,
107
+ )
108
+ return super()._from_config(config, **kwargs)
109
+
110
+ def freeze_batch_norm_2d(self):
111
+ timm.layers.freeze_batch_norm_2d(self._backbone)
112
+
113
+ def unfreeze_batch_norm_2d(self):
114
+ timm.layers.unfreeze_batch_norm_2d(self._backbone)
115
+
116
+ def _init_weights(self, module):
117
+ """
118
+ Empty init weights function to ensure compatibility of the class in the library.
119
+ """
120
+ pass
121
+
122
+ def forward(
123
+ self,
124
+ pixel_values: torch.FloatTensor,
125
+ output_attentions: Optional[bool] = None,
126
+ output_hidden_states: Optional[bool] = None,
127
+ return_dict: Optional[bool] = None,
128
+ **kwargs,
129
+ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
130
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
131
+ output_hidden_states = (
132
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
133
+ )
134
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
135
+
136
+ if output_attentions:
137
+ raise ValueError("Cannot output attentions for timm backbones at the moment")
138
+
139
+ if output_hidden_states:
140
+ # We modify the return layers to include all the stages of the backbone
141
+ self._backbone.return_layers = self._all_layers
142
+ hidden_states = self._backbone(pixel_values, **kwargs)
143
+ self._backbone.return_layers = self._return_layers
144
+ feature_maps = tuple(hidden_states[i] for i in self.out_indices)
145
+ else:
146
+ feature_maps = self._backbone(pixel_values, **kwargs)
147
+ hidden_states = None
148
+
149
+ feature_maps = tuple(feature_maps)
150
+ hidden_states = tuple(hidden_states) if hidden_states is not None else None
151
+
152
+ if not return_dict:
153
+ output = (feature_maps,)
154
+ if output_hidden_states:
155
+ output = output + (hidden_states,)
156
+ return output
157
+
158
+ return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states, attentions=None)
env-llmeval/lib/python3.10/site-packages/transformers/models/videomae/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_videomae"] = [
30
+ "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "VideoMAEForPreTraining",
32
+ "VideoMAEModel",
33
+ "VideoMAEPreTrainedModel",
34
+ "VideoMAEForVideoClassification",
35
+ ]
36
+
37
+ try:
38
+ if not is_vision_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["feature_extraction_videomae"] = ["VideoMAEFeatureExtractor"]
44
+ _import_structure["image_processing_videomae"] = ["VideoMAEImageProcessor"]
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig
48
+
49
+ try:
50
+ if not is_torch_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .modeling_videomae import (
56
+ VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST,
57
+ VideoMAEForPreTraining,
58
+ VideoMAEForVideoClassification,
59
+ VideoMAEModel,
60
+ VideoMAEPreTrainedModel,
61
+ )
62
+
63
+ try:
64
+ if not is_vision_available():
65
+ raise OptionalDependencyNotAvailable()
66
+ except OptionalDependencyNotAvailable:
67
+ pass
68
+ else:
69
+ from .feature_extraction_videomae import VideoMAEFeatureExtractor
70
+ from .image_processing_videomae import VideoMAEImageProcessor
71
+
72
+ else:
73
+ import sys
74
+
75
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)