applied-ai-018 commited on
Commit
fa55437
·
verified ·
1 Parent(s): 5722a9e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_japanese/__init__.py +29 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/bert_japanese/tokenization_bert_japanese.py +980 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__init__.py +147 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/__init__.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/configuration_big_bird.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/convert_bigbird_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/modeling_big_bird.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/modeling_flax_big_bird.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/tokenization_big_bird.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/tokenization_big_bird_fast.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/configuration_big_bird.py +175 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py +70 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/modeling_big_bird.py +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/modeling_flax_big_bird.py +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/tokenization_big_bird.py +322 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/tokenization_big_bird_fast.py +230 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__init__.py +71 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/configuration_bigbird_pegasus.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/convert_bigbird_pegasus_tf_to_pytorch.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +412 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py +170 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/image_processing_bridgetower.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__init__.py +88 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/__init__.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/configuration_mobilenet_v2.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/feature_extraction_mobilenet_v2.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/image_processing_mobilenet_v2.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/modeling_mobilenet_v2.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +154 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py +178 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py +33 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +373 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +862 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/qdqbert/__init__.py +71 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/configuration_qdqbert.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/qdqbert/configuration_qdqbert.py +123 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/qdqbert/modeling_qdqbert.py +1737 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__init__.py +82 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_japanese/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_bert_japanese": ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
25
+
26
+ else:
27
+ import sys
28
+
29
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (583 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc ADDED
Binary file (29.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert_japanese/tokenization_bert_japanese.py ADDED
@@ -0,0 +1,980 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes."""
16
+
17
+
18
+ import collections
19
+ import copy
20
+ import os
21
+ import unicodedata
22
+ from typing import Any, Dict, List, Optional, Tuple
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
25
+ from ...utils import is_sentencepiece_available, is_sudachi_projection_available, logging
26
+
27
+
28
+ if is_sentencepiece_available():
29
+ import sentencepiece as spm
30
+ else:
31
+ spm = None
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "spm_file": "spiece.model"}
36
+
37
+ SPIECE_UNDERLINE = "▁"
38
+
39
+
40
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
41
+ def load_vocab(vocab_file):
42
+ """Loads a vocabulary file into a dictionary."""
43
+ vocab = collections.OrderedDict()
44
+ with open(vocab_file, "r", encoding="utf-8") as reader:
45
+ tokens = reader.readlines()
46
+ for index, token in enumerate(tokens):
47
+ token = token.rstrip("\n")
48
+ vocab[token] = index
49
+ return vocab
50
+
51
+
52
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
53
+ def whitespace_tokenize(text):
54
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
55
+ text = text.strip()
56
+ if not text:
57
+ return []
58
+ tokens = text.split()
59
+ return tokens
60
+
61
+
62
+ class BertJapaneseTokenizer(PreTrainedTokenizer):
63
+ r"""
64
+ Construct a BERT tokenizer for Japanese text.
65
+
66
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
67
+ to: this superclass for more information regarding those methods.
68
+
69
+ Args:
70
+ vocab_file (`str`):
71
+ Path to a one-wordpiece-per-line vocabulary file.
72
+ spm_file (`str`, *optional*):
73
+ Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm or .model
74
+ extension) that contains the vocabulary.
75
+ do_lower_case (`bool`, *optional*, defaults to `True`):
76
+ Whether to lower case the input. Only has an effect when do_basic_tokenize=True.
77
+ do_word_tokenize (`bool`, *optional*, defaults to `True`):
78
+ Whether to do word tokenization.
79
+ do_subword_tokenize (`bool`, *optional*, defaults to `True`):
80
+ Whether to do subword tokenization.
81
+ word_tokenizer_type (`str`, *optional*, defaults to `"basic"`):
82
+ Type of word tokenizer. Choose from ["basic", "mecab", "sudachi", "jumanpp"].
83
+ subword_tokenizer_type (`str`, *optional*, defaults to `"wordpiece"`):
84
+ Type of subword tokenizer. Choose from ["wordpiece", "character", "sentencepiece",].
85
+ mecab_kwargs (`dict`, *optional*):
86
+ Dictionary passed to the `MecabTokenizer` constructor.
87
+ sudachi_kwargs (`dict`, *optional*):
88
+ Dictionary passed to the `SudachiTokenizer` constructor.
89
+ jumanpp_kwargs (`dict`, *optional*):
90
+ Dictionary passed to the `JumanppTokenizer` constructor.
91
+ """
92
+
93
+ vocab_files_names = VOCAB_FILES_NAMES
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_file,
98
+ spm_file=None,
99
+ do_lower_case=False,
100
+ do_word_tokenize=True,
101
+ do_subword_tokenize=True,
102
+ word_tokenizer_type="basic",
103
+ subword_tokenizer_type="wordpiece",
104
+ never_split=None,
105
+ unk_token="[UNK]",
106
+ sep_token="[SEP]",
107
+ pad_token="[PAD]",
108
+ cls_token="[CLS]",
109
+ mask_token="[MASK]",
110
+ mecab_kwargs=None,
111
+ sudachi_kwargs=None,
112
+ jumanpp_kwargs=None,
113
+ **kwargs,
114
+ ):
115
+ if subword_tokenizer_type == "sentencepiece":
116
+ if not os.path.isfile(spm_file):
117
+ raise ValueError(
118
+ f"Can't find a vocabulary file at path '{spm_file}'. To load the vocabulary from a Google"
119
+ " pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
120
+ )
121
+ self.spm_file = spm_file
122
+ else:
123
+ if not os.path.isfile(vocab_file):
124
+ raise ValueError(
125
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google"
126
+ " pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
127
+ )
128
+ self.vocab = load_vocab(vocab_file)
129
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
130
+
131
+ self.do_word_tokenize = do_word_tokenize
132
+ self.word_tokenizer_type = word_tokenizer_type
133
+ self.lower_case = do_lower_case
134
+ self.never_split = never_split
135
+ self.mecab_kwargs = copy.deepcopy(mecab_kwargs)
136
+ self.sudachi_kwargs = copy.deepcopy(sudachi_kwargs)
137
+ self.jumanpp_kwargs = copy.deepcopy(jumanpp_kwargs)
138
+ if do_word_tokenize:
139
+ if word_tokenizer_type == "basic":
140
+ self.word_tokenizer = BasicTokenizer(
141
+ do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False
142
+ )
143
+ elif word_tokenizer_type == "mecab":
144
+ self.word_tokenizer = MecabTokenizer(
145
+ do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {})
146
+ )
147
+ elif word_tokenizer_type == "sudachi":
148
+ self.word_tokenizer = SudachiTokenizer(
149
+ do_lower_case=do_lower_case, never_split=never_split, **(sudachi_kwargs or {})
150
+ )
151
+ elif word_tokenizer_type == "jumanpp":
152
+ self.word_tokenizer = JumanppTokenizer(
153
+ do_lower_case=do_lower_case, never_split=never_split, **(jumanpp_kwargs or {})
154
+ )
155
+ else:
156
+ raise ValueError(f"Invalid word_tokenizer_type '{word_tokenizer_type}' is specified.")
157
+
158
+ self.do_subword_tokenize = do_subword_tokenize
159
+ self.subword_tokenizer_type = subword_tokenizer_type
160
+ if do_subword_tokenize:
161
+ if subword_tokenizer_type == "wordpiece":
162
+ self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
163
+ elif subword_tokenizer_type == "character":
164
+ self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=str(unk_token))
165
+ elif subword_tokenizer_type == "sentencepiece":
166
+ self.subword_tokenizer = SentencepieceTokenizer(vocab=self.spm_file, unk_token=str(unk_token))
167
+ else:
168
+ raise ValueError(f"Invalid subword_tokenizer_type '{subword_tokenizer_type}' is specified.")
169
+ super().__init__(
170
+ spm_file=spm_file,
171
+ unk_token=unk_token,
172
+ sep_token=sep_token,
173
+ pad_token=pad_token,
174
+ cls_token=cls_token,
175
+ mask_token=mask_token,
176
+ do_lower_case=do_lower_case,
177
+ do_word_tokenize=do_word_tokenize,
178
+ do_subword_tokenize=do_subword_tokenize,
179
+ word_tokenizer_type=word_tokenizer_type,
180
+ subword_tokenizer_type=subword_tokenizer_type,
181
+ never_split=never_split,
182
+ mecab_kwargs=mecab_kwargs,
183
+ sudachi_kwargs=sudachi_kwargs,
184
+ jumanpp_kwargs=jumanpp_kwargs,
185
+ **kwargs,
186
+ )
187
+
188
+ @property
189
+ def do_lower_case(self):
190
+ return self.lower_case
191
+
192
+ def __getstate__(self):
193
+ state = dict(self.__dict__)
194
+ if self.word_tokenizer_type in ["mecab", "sudachi", "jumanpp"]:
195
+ del state["word_tokenizer"]
196
+ return state
197
+
198
+ def __setstate__(self, state):
199
+ self.__dict__ = state
200
+ if self.word_tokenizer_type == "mecab":
201
+ self.word_tokenizer = MecabTokenizer(
202
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.mecab_kwargs or {})
203
+ )
204
+ elif self.word_tokenizer_type == "sudachi":
205
+ self.word_tokenizer = SudachiTokenizer(
206
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.sudachi_kwargs or {})
207
+ )
208
+ elif self.word_tokenizer_type == "jumanpp":
209
+ self.word_tokenizer = JumanppTokenizer(
210
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.jumanpp_kwargs or {})
211
+ )
212
+
213
+ def _tokenize(self, text):
214
+ if self.do_word_tokenize:
215
+ tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens)
216
+ else:
217
+ tokens = [text]
218
+
219
+ if self.do_subword_tokenize:
220
+ split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)]
221
+ else:
222
+ split_tokens = tokens
223
+
224
+ return split_tokens
225
+
226
+ @property
227
+ def vocab_size(self):
228
+ if self.subword_tokenizer_type == "sentencepiece":
229
+ return len(self.subword_tokenizer.sp_model)
230
+ return len(self.vocab)
231
+
232
+ def get_vocab(self):
233
+ if self.subword_tokenizer_type == "sentencepiece":
234
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
235
+ vocab.update(self.added_tokens_encoder)
236
+ return vocab
237
+ return dict(self.vocab, **self.added_tokens_encoder)
238
+
239
+ def _convert_token_to_id(self, token):
240
+ """Converts a token (str) in an id using the vocab."""
241
+ if self.subword_tokenizer_type == "sentencepiece":
242
+ return self.subword_tokenizer.sp_model.PieceToId(token)
243
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
244
+
245
+ def _convert_id_to_token(self, index):
246
+ """Converts an index (integer) in a token (str) using the vocab."""
247
+ if self.subword_tokenizer_type == "sentencepiece":
248
+ return self.subword_tokenizer.sp_model.IdToPiece(index)
249
+ return self.ids_to_tokens.get(index, self.unk_token)
250
+
251
+ def convert_tokens_to_string(self, tokens):
252
+ """Converts a sequence of tokens (string) in a single string."""
253
+ if self.subword_tokenizer_type == "sentencepiece":
254
+ return self.subword_tokenizer.sp_model.decode(tokens)
255
+ out_string = " ".join(tokens).replace(" ##", "").strip()
256
+ return out_string
257
+
258
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
259
+ def build_inputs_with_special_tokens(
260
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
261
+ ) -> List[int]:
262
+ """
263
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
264
+ adding special tokens. A BERT sequence has the following format:
265
+
266
+ - single sequence: `[CLS] X [SEP]`
267
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
268
+
269
+ Args:
270
+ token_ids_0 (`List[int]`):
271
+ List of IDs to which the special tokens will be added.
272
+ token_ids_1 (`List[int]`, *optional*):
273
+ Optional second list of IDs for sequence pairs.
274
+
275
+ Returns:
276
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
277
+ """
278
+ if token_ids_1 is None:
279
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
280
+ cls = [self.cls_token_id]
281
+ sep = [self.sep_token_id]
282
+ return cls + token_ids_0 + sep + token_ids_1 + sep
283
+
284
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
285
+ def get_special_tokens_mask(
286
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
287
+ ) -> List[int]:
288
+ """
289
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
290
+ special tokens using the tokenizer `prepare_for_model` method.
291
+
292
+ Args:
293
+ token_ids_0 (`List[int]`):
294
+ List of IDs.
295
+ token_ids_1 (`List[int]`, *optional*):
296
+ Optional second list of IDs for sequence pairs.
297
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
298
+ Whether or not the token list is already formatted with special tokens for the model.
299
+
300
+ Returns:
301
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
302
+ """
303
+
304
+ if already_has_special_tokens:
305
+ return super().get_special_tokens_mask(
306
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
307
+ )
308
+
309
+ if token_ids_1 is not None:
310
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
311
+ return [1] + ([0] * len(token_ids_0)) + [1]
312
+
313
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
314
+ def create_token_type_ids_from_sequences(
315
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
316
+ ) -> List[int]:
317
+ """
318
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
319
+ pair mask has the following format:
320
+
321
+ ```
322
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
323
+ | first sequence | second sequence |
324
+ ```
325
+
326
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
327
+
328
+ Args:
329
+ token_ids_0 (`List[int]`):
330
+ List of IDs.
331
+ token_ids_1 (`List[int]`, *optional*):
332
+ Optional second list of IDs for sequence pairs.
333
+
334
+ Returns:
335
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
336
+ """
337
+ sep = [self.sep_token_id]
338
+ cls = [self.cls_token_id]
339
+ if token_ids_1 is None:
340
+ return len(cls + token_ids_0 + sep) * [0]
341
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
342
+
343
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
344
+ if os.path.isdir(save_directory):
345
+ if self.subword_tokenizer_type == "sentencepiece":
346
+ vocab_file = os.path.join(
347
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["spm_file"]
348
+ )
349
+ else:
350
+ vocab_file = os.path.join(
351
+ save_directory,
352
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"],
353
+ )
354
+ else:
355
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
356
+
357
+ if self.subword_tokenizer_type == "sentencepiece":
358
+ with open(vocab_file, "wb") as writer:
359
+ content_spiece_model = self.subword_tokenizer.sp_model.serialized_model_proto()
360
+ writer.write(content_spiece_model)
361
+ else:
362
+ with open(vocab_file, "w", encoding="utf-8") as writer:
363
+ index = 0
364
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
365
+ if index != token_index:
366
+ logger.warning(
367
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
368
+ " Please check that the vocabulary is not corrupted!"
369
+ )
370
+ index = token_index
371
+ writer.write(token + "\n")
372
+ index += 1
373
+ return (vocab_file,)
374
+
375
+
376
+ class MecabTokenizer:
377
+ """Runs basic tokenization with MeCab morphological parser."""
378
+
379
+ def __init__(
380
+ self,
381
+ do_lower_case=False,
382
+ never_split=None,
383
+ normalize_text=True,
384
+ mecab_dic: Optional[str] = "ipadic",
385
+ mecab_option: Optional[str] = None,
386
+ ):
387
+ """
388
+ Constructs a MecabTokenizer.
389
+
390
+ Args:
391
+ **do_lower_case**: (*optional*) boolean (default True)
392
+ Whether to lowercase the input.
393
+ **never_split**: (*optional*) list of str
394
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
395
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
396
+ **normalize_text**: (*optional*) boolean (default True)
397
+ Whether to apply unicode normalization to text before tokenization.
398
+ **mecab_dic**: (*optional*) string (default "ipadic")
399
+ Name of dictionary to be used for MeCab initialization. If you are using a system-installed dictionary,
400
+ set this option to `None` and modify *mecab_option*.
401
+ **mecab_option**: (*optional*) string
402
+ String passed to MeCab constructor.
403
+ """
404
+ self.do_lower_case = do_lower_case
405
+ self.never_split = never_split if never_split is not None else []
406
+ self.normalize_text = normalize_text
407
+
408
+ try:
409
+ import fugashi
410
+ except ModuleNotFoundError as error:
411
+ raise error.__class__(
412
+ "You need to install fugashi to use MecabTokenizer. "
413
+ "See https://pypi.org/project/fugashi/ for installation."
414
+ )
415
+
416
+ mecab_option = mecab_option or ""
417
+
418
+ if mecab_dic is not None:
419
+ if mecab_dic == "ipadic":
420
+ try:
421
+ import ipadic
422
+ except ModuleNotFoundError as error:
423
+ raise error.__class__(
424
+ "The ipadic dictionary is not installed. "
425
+ "See https://github.com/polm/ipadic-py for installation."
426
+ )
427
+
428
+ dic_dir = ipadic.DICDIR
429
+
430
+ elif mecab_dic == "unidic_lite":
431
+ try:
432
+ import unidic_lite
433
+ except ModuleNotFoundError as error:
434
+ raise error.__class__(
435
+ "The unidic_lite dictionary is not installed. "
436
+ "See https://github.com/polm/unidic-lite for installation."
437
+ )
438
+
439
+ dic_dir = unidic_lite.DICDIR
440
+
441
+ elif mecab_dic == "unidic":
442
+ try:
443
+ import unidic
444
+ except ModuleNotFoundError as error:
445
+ raise error.__class__(
446
+ "The unidic dictionary is not installed. "
447
+ "See https://github.com/polm/unidic-py for installation."
448
+ )
449
+
450
+ dic_dir = unidic.DICDIR
451
+ if not os.path.isdir(dic_dir):
452
+ raise RuntimeError(
453
+ "The unidic dictionary itself is not found. "
454
+ "See https://github.com/polm/unidic-py for installation."
455
+ )
456
+
457
+ else:
458
+ raise ValueError("Invalid mecab_dic is specified.")
459
+
460
+ mecabrc = os.path.join(dic_dir, "mecabrc")
461
+ mecab_option = f'-d "{dic_dir}" -r "{mecabrc}" ' + mecab_option
462
+
463
+ self.mecab = fugashi.GenericTagger(mecab_option)
464
+
465
+ def tokenize(self, text, never_split=None, **kwargs):
466
+ """Tokenizes a piece of text."""
467
+ if self.normalize_text:
468
+ text = unicodedata.normalize("NFKC", text)
469
+
470
+ never_split = self.never_split + (never_split if never_split is not None else [])
471
+ tokens = []
472
+
473
+ for word in self.mecab(text):
474
+ token = word.surface
475
+
476
+ if self.do_lower_case and token not in never_split:
477
+ token = token.lower()
478
+
479
+ tokens.append(token)
480
+
481
+ return tokens
482
+
483
+
484
+ class SudachiTokenizer:
485
+ """Runs basic tokenization with Sudachi morphological parser."""
486
+
487
+ def __init__(
488
+ self,
489
+ do_lower_case=False,
490
+ never_split=None,
491
+ normalize_text=True,
492
+ trim_whitespace=False,
493
+ sudachi_split_mode="A",
494
+ sudachi_config_path=None,
495
+ sudachi_resource_dir=None,
496
+ sudachi_dict_type="core",
497
+ sudachi_projection=None,
498
+ ):
499
+ """
500
+ Constructs a SudachiTokenizer.
501
+
502
+ Args:
503
+ **do_lower_case**: (*optional*) boolean (default True)
504
+ Whether to lowercase the input.
505
+ **never_split**: (*optional*) list of str
506
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
507
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
508
+ **normalize_text**: (*optional*) boolean (default True)
509
+ Whether to apply unicode normalization to text before tokenization.
510
+ **trim_whitespace**: (*optional*) boolean (default False)
511
+ Whether to trim all whitespace, tab, newline from tokens.
512
+ **sudachi_split_mode**: (*optional*) string
513
+ Split mode of sudachi, choose from `["A", "B", "C"]`.
514
+ **sudachi_config_path**: (*optional*) string
515
+ **sudachi_resource_dir**: (*optional*) string
516
+ **sudachi_dict_type**: (*optional*) string
517
+ dict type of sudachi, choose from `["small", "core", "full"]`.
518
+ **sudachi_projection**: (*optional*) string
519
+ Word projection mode of sudachi, choose from `["surface", "normalized", "reading", "dictionary", "dictionary_and_surface", "normalized_and_surface", "normalized_nouns"]`.
520
+ """
521
+
522
+ self.do_lower_case = do_lower_case
523
+ self.never_split = never_split if never_split is not None else []
524
+ self.normalize_text = normalize_text
525
+ self.trim_whitespace = trim_whitespace
526
+
527
+ try:
528
+ from sudachipy import dictionary, tokenizer
529
+ except ImportError:
530
+ raise ImportError(
531
+ "You need to install sudachipy to use SudachiTokenizer. "
532
+ "See https://github.com/WorksApplications/SudachiPy for installation."
533
+ )
534
+
535
+ if sudachi_split_mode == "A":
536
+ self.split_mode = tokenizer.Tokenizer.SplitMode.A
537
+ elif sudachi_split_mode == "B":
538
+ self.split_mode = tokenizer.Tokenizer.SplitMode.B
539
+ elif sudachi_split_mode == "C":
540
+ self.split_mode = tokenizer.Tokenizer.SplitMode.C
541
+ else:
542
+ raise ValueError("Invalid sudachi_split_mode is specified.")
543
+
544
+ self.projection = sudachi_projection
545
+
546
+ sudachi_dictionary = dictionary.Dictionary(
547
+ config_path=sudachi_config_path, resource_dir=sudachi_resource_dir, dict=sudachi_dict_type
548
+ )
549
+ if is_sudachi_projection_available():
550
+ self.sudachi = sudachi_dictionary.create(self.split_mode, projection=self.projection)
551
+ elif self.projection is not None:
552
+ raise ImportError("You need to install sudachipy>=0.6.8 to specify `projection` field in sudachi_kwargs.")
553
+ else:
554
+ self.sudachi = sudachi_dictionary.create(self.split_mode)
555
+
556
+ def tokenize(self, text, never_split=None, **kwargs):
557
+ """Tokenizes a piece of text."""
558
+ if self.normalize_text:
559
+ text = unicodedata.normalize("NFKC", text)
560
+
561
+ never_split = self.never_split + (never_split if never_split is not None else [])
562
+ tokens = []
563
+
564
+ for word in self.sudachi.tokenize(text):
565
+ token = word.surface()
566
+
567
+ if self.do_lower_case and token not in never_split:
568
+ token = token.lower()
569
+
570
+ if self.trim_whitespace:
571
+ if token.strip() == "":
572
+ continue
573
+ else:
574
+ token = token.strip()
575
+
576
+ tokens.append(token)
577
+
578
+ return tokens
579
+
580
+
581
+ class JumanppTokenizer:
582
+ """Runs basic tokenization with jumanpp morphological parser."""
583
+
584
+ def __init__(
585
+ self,
586
+ do_lower_case=False,
587
+ never_split=None,
588
+ normalize_text=True,
589
+ trim_whitespace=False,
590
+ ):
591
+ """
592
+ Constructs a JumanppTokenizer.
593
+
594
+ Args:
595
+ **do_lower_case**: (*optional*) boolean (default True)
596
+ Whether to lowercase the input.
597
+ **never_split**: (*optional*) list of str
598
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
599
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
600
+ **normalize_text**: (*optional*) boolean (default True)
601
+ Whether to apply unicode normalization to text before tokenization.
602
+ **trim_whitespace**: (*optional*) boolean (default False)
603
+ Whether to trim all whitespace, tab, newline from tokens.
604
+ """
605
+
606
+ self.do_lower_case = do_lower_case
607
+ self.never_split = never_split if never_split is not None else []
608
+ self.normalize_text = normalize_text
609
+ self.trim_whitespace = trim_whitespace
610
+
611
+ try:
612
+ import rhoknp
613
+ except ImportError:
614
+ raise ImportError(
615
+ "You need to install rhoknp to use JumanppTokenizer. "
616
+ "See https://github.com/ku-nlp/rhoknp for installation."
617
+ )
618
+
619
+ self.juman = rhoknp.Jumanpp()
620
+
621
+ def tokenize(self, text, never_split=None, **kwargs):
622
+ """Tokenizes a piece of text."""
623
+ if self.normalize_text:
624
+ text = unicodedata.normalize("NFKC", text)
625
+
626
+ text = text.strip()
627
+
628
+ never_split = self.never_split + (never_split if never_split is not None else [])
629
+ tokens = []
630
+
631
+ for mrph in self.juman.apply_to_sentence(text).morphemes:
632
+ token = mrph.text
633
+
634
+ if self.do_lower_case and token not in never_split:
635
+ token = token.lower()
636
+
637
+ if self.trim_whitespace:
638
+ if token.strip() == "":
639
+ continue
640
+ else:
641
+ token = token.strip()
642
+
643
+ tokens.append(token)
644
+
645
+ return tokens
646
+
647
+
648
+ class CharacterTokenizer:
649
+ """Runs Character tokenization."""
650
+
651
+ def __init__(self, vocab, unk_token, normalize_text=True):
652
+ """
653
+ Constructs a CharacterTokenizer.
654
+
655
+ Args:
656
+ **vocab**:
657
+ Vocabulary object.
658
+ **unk_token**: str
659
+ A special symbol for out-of-vocabulary token.
660
+ **normalize_text**: (`optional`) boolean (default True)
661
+ Whether to apply unicode normalization to text before tokenization.
662
+ """
663
+ self.vocab = vocab
664
+ self.unk_token = unk_token
665
+ self.normalize_text = normalize_text
666
+
667
+ def tokenize(self, text):
668
+ """
669
+ Tokenizes a piece of text into characters.
670
+
671
+ For example, `input = "apple""` wil return as output `["a", "p", "p", "l", "e"]`.
672
+
673
+ Args:
674
+ text: A single token or whitespace separated tokens.
675
+ This should have already been passed through *BasicTokenizer*.
676
+
677
+ Returns:
678
+ A list of characters.
679
+ """
680
+ if self.normalize_text:
681
+ text = unicodedata.normalize("NFKC", text)
682
+
683
+ output_tokens = []
684
+ for char in text:
685
+ if char not in self.vocab:
686
+ output_tokens.append(self.unk_token)
687
+ continue
688
+
689
+ output_tokens.append(char)
690
+
691
+ return output_tokens
692
+
693
+
694
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
695
+ class BasicTokenizer(object):
696
+ """
697
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
698
+
699
+ Args:
700
+ do_lower_case (`bool`, *optional*, defaults to `True`):
701
+ Whether or not to lowercase the input when tokenizing.
702
+ never_split (`Iterable`, *optional*):
703
+ Collection of tokens which will never be split during tokenization. Only has an effect when
704
+ `do_basic_tokenize=True`
705
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
706
+ Whether or not to tokenize Chinese characters.
707
+
708
+ This should likely be deactivated for Japanese (see this
709
+ [issue](https://github.com/huggingface/transformers/issues/328)).
710
+ strip_accents (`bool`, *optional*):
711
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
712
+ value for `lowercase` (as in the original BERT).
713
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
714
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
715
+ the full context of the words, such as contractions.
716
+ """
717
+
718
+ def __init__(
719
+ self,
720
+ do_lower_case=True,
721
+ never_split=None,
722
+ tokenize_chinese_chars=True,
723
+ strip_accents=None,
724
+ do_split_on_punc=True,
725
+ ):
726
+ if never_split is None:
727
+ never_split = []
728
+ self.do_lower_case = do_lower_case
729
+ self.never_split = set(never_split)
730
+ self.tokenize_chinese_chars = tokenize_chinese_chars
731
+ self.strip_accents = strip_accents
732
+ self.do_split_on_punc = do_split_on_punc
733
+
734
+ def tokenize(self, text, never_split=None):
735
+ """
736
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
737
+
738
+ Args:
739
+ never_split (`List[str]`, *optional*)
740
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
741
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
742
+ """
743
+ # union() returns a new set by concatenating the two sets.
744
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
745
+ text = self._clean_text(text)
746
+
747
+ # This was added on November 1st, 2018 for the multilingual and Chinese
748
+ # models. This is also applied to the English models now, but it doesn't
749
+ # matter since the English models were not trained on any Chinese data
750
+ # and generally don't have any Chinese data in them (there are Chinese
751
+ # characters in the vocabulary because Wikipedia does have some Chinese
752
+ # words in the English Wikipedia.).
753
+ if self.tokenize_chinese_chars:
754
+ text = self._tokenize_chinese_chars(text)
755
+ # prevents treating the same character with different unicode codepoints as different characters
756
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
757
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
758
+ split_tokens = []
759
+ for token in orig_tokens:
760
+ if token not in never_split:
761
+ if self.do_lower_case:
762
+ token = token.lower()
763
+ if self.strip_accents is not False:
764
+ token = self._run_strip_accents(token)
765
+ elif self.strip_accents:
766
+ token = self._run_strip_accents(token)
767
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
768
+
769
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
770
+ return output_tokens
771
+
772
+ def _run_strip_accents(self, text):
773
+ """Strips accents from a piece of text."""
774
+ text = unicodedata.normalize("NFD", text)
775
+ output = []
776
+ for char in text:
777
+ cat = unicodedata.category(char)
778
+ if cat == "Mn":
779
+ continue
780
+ output.append(char)
781
+ return "".join(output)
782
+
783
+ def _run_split_on_punc(self, text, never_split=None):
784
+ """Splits punctuation on a piece of text."""
785
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
786
+ return [text]
787
+ chars = list(text)
788
+ i = 0
789
+ start_new_word = True
790
+ output = []
791
+ while i < len(chars):
792
+ char = chars[i]
793
+ if _is_punctuation(char):
794
+ output.append([char])
795
+ start_new_word = True
796
+ else:
797
+ if start_new_word:
798
+ output.append([])
799
+ start_new_word = False
800
+ output[-1].append(char)
801
+ i += 1
802
+
803
+ return ["".join(x) for x in output]
804
+
805
+ def _tokenize_chinese_chars(self, text):
806
+ """Adds whitespace around any CJK character."""
807
+ output = []
808
+ for char in text:
809
+ cp = ord(char)
810
+ if self._is_chinese_char(cp):
811
+ output.append(" ")
812
+ output.append(char)
813
+ output.append(" ")
814
+ else:
815
+ output.append(char)
816
+ return "".join(output)
817
+
818
+ def _is_chinese_char(self, cp):
819
+ """Checks whether CP is the codepoint of a CJK character."""
820
+ # This defines a "chinese character" as anything in the CJK Unicode block:
821
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
822
+ #
823
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
824
+ # despite its name. The modern Korean Hangul alphabet is a different block,
825
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
826
+ # space-separated words, so they are not treated specially and handled
827
+ # like the all of the other languages.
828
+ if (
829
+ (cp >= 0x4E00 and cp <= 0x9FFF)
830
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
831
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
832
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
833
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
834
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
835
+ or (cp >= 0xF900 and cp <= 0xFAFF)
836
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
837
+ ): #
838
+ return True
839
+
840
+ return False
841
+
842
+ def _clean_text(self, text):
843
+ """Performs invalid character removal and whitespace cleanup on text."""
844
+ output = []
845
+ for char in text:
846
+ cp = ord(char)
847
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
848
+ continue
849
+ if _is_whitespace(char):
850
+ output.append(" ")
851
+ else:
852
+ output.append(char)
853
+ return "".join(output)
854
+
855
+
856
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
857
+ class WordpieceTokenizer(object):
858
+ """Runs WordPiece tokenization."""
859
+
860
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
861
+ self.vocab = vocab
862
+ self.unk_token = unk_token
863
+ self.max_input_chars_per_word = max_input_chars_per_word
864
+
865
+ def tokenize(self, text):
866
+ """
867
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
868
+ tokenization using the given vocabulary.
869
+
870
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
871
+
872
+ Args:
873
+ text: A single token or whitespace separated tokens. This should have
874
+ already been passed through *BasicTokenizer*.
875
+
876
+ Returns:
877
+ A list of wordpiece tokens.
878
+ """
879
+
880
+ output_tokens = []
881
+ for token in whitespace_tokenize(text):
882
+ chars = list(token)
883
+ if len(chars) > self.max_input_chars_per_word:
884
+ output_tokens.append(self.unk_token)
885
+ continue
886
+
887
+ is_bad = False
888
+ start = 0
889
+ sub_tokens = []
890
+ while start < len(chars):
891
+ end = len(chars)
892
+ cur_substr = None
893
+ while start < end:
894
+ substr = "".join(chars[start:end])
895
+ if start > 0:
896
+ substr = "##" + substr
897
+ if substr in self.vocab:
898
+ cur_substr = substr
899
+ break
900
+ end -= 1
901
+ if cur_substr is None:
902
+ is_bad = True
903
+ break
904
+ sub_tokens.append(cur_substr)
905
+ start = end
906
+
907
+ if is_bad:
908
+ output_tokens.append(self.unk_token)
909
+ else:
910
+ output_tokens.extend(sub_tokens)
911
+ return output_tokens
912
+
913
+
914
+ class SentencepieceTokenizer(object):
915
+ """
916
+ Runs sentencepiece tokenization. Based on transformers.models.albert.tokenization_albert.AlbertTokenizer.
917
+ """
918
+
919
+ def __init__(
920
+ self,
921
+ vocab,
922
+ unk_token,
923
+ do_lower_case=False,
924
+ remove_space=True,
925
+ keep_accents=True,
926
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
927
+ ):
928
+ self.vocab = vocab
929
+ self.unk_token = unk_token
930
+ self.do_lower_case = do_lower_case
931
+ self.remove_space = remove_space
932
+ self.keep_accents = keep_accents
933
+
934
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
935
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
936
+ self.sp_model.Load(self.vocab)
937
+
938
+ def preprocess_text(self, inputs):
939
+ if self.remove_space:
940
+ outputs = " ".join(inputs.strip().split())
941
+ else:
942
+ outputs = inputs
943
+ outputs = outputs.replace("``", '"').replace("''", '"')
944
+
945
+ if not self.keep_accents:
946
+ outputs = unicodedata.normalize("NFKD", outputs)
947
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
948
+ if self.do_lower_case:
949
+ outputs = outputs.lower()
950
+
951
+ return outputs
952
+
953
+ def tokenize(self, text):
954
+ """
955
+ Tokenizes text by sentencepiece. Based on [SentencePiece](https://github.com/google/sentencepiece).
956
+ Tokenization needs the given vocabulary.
957
+
958
+ Args:
959
+ text: A string needs to be tokenized.
960
+
961
+ Returns:
962
+ A list of sentencepiece tokens.
963
+ """
964
+ text = self.preprocess_text(text)
965
+ pieces = self.sp_model.encode(text, out_type=str)
966
+ new_pieces = []
967
+ for piece in pieces:
968
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
969
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
970
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
971
+ if len(cur_pieces[0]) == 1:
972
+ cur_pieces = cur_pieces[1:]
973
+ else:
974
+ cur_pieces[0] = cur_pieces[0][1:]
975
+ cur_pieces.append(piece[-1])
976
+ new_pieces.extend(cur_pieces)
977
+ else:
978
+ new_pieces.append(piece)
979
+
980
+ return new_pieces
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__init__.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig", "BigBirdOnnxConfig"],
29
+ }
30
+
31
+ try:
32
+ if not is_sentencepiece_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_big_bird"] = ["BigBirdTokenizer"]
38
+
39
+ try:
40
+ if not is_tokenizers_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["tokenization_big_bird_fast"] = ["BigBirdTokenizerFast"]
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ _import_structure["modeling_big_bird"] = [
54
+ "BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST",
55
+ "BigBirdForCausalLM",
56
+ "BigBirdForMaskedLM",
57
+ "BigBirdForMultipleChoice",
58
+ "BigBirdForPreTraining",
59
+ "BigBirdForQuestionAnswering",
60
+ "BigBirdForSequenceClassification",
61
+ "BigBirdForTokenClassification",
62
+ "BigBirdLayer",
63
+ "BigBirdModel",
64
+ "BigBirdPreTrainedModel",
65
+ "load_tf_weights_in_big_bird",
66
+ ]
67
+
68
+ try:
69
+ if not is_flax_available():
70
+ raise OptionalDependencyNotAvailable()
71
+ except OptionalDependencyNotAvailable:
72
+ pass
73
+ else:
74
+ _import_structure["modeling_flax_big_bird"] = [
75
+ "FlaxBigBirdForCausalLM",
76
+ "FlaxBigBirdForMaskedLM",
77
+ "FlaxBigBirdForMultipleChoice",
78
+ "FlaxBigBirdForPreTraining",
79
+ "FlaxBigBirdForQuestionAnswering",
80
+ "FlaxBigBirdForSequenceClassification",
81
+ "FlaxBigBirdForTokenClassification",
82
+ "FlaxBigBirdModel",
83
+ "FlaxBigBirdPreTrainedModel",
84
+ ]
85
+
86
+ if TYPE_CHECKING:
87
+ from .configuration_big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig, BigBirdOnnxConfig
88
+
89
+ try:
90
+ if not is_sentencepiece_available():
91
+ raise OptionalDependencyNotAvailable()
92
+ except OptionalDependencyNotAvailable:
93
+ pass
94
+ else:
95
+ from .tokenization_big_bird import BigBirdTokenizer
96
+
97
+ try:
98
+ if not is_tokenizers_available():
99
+ raise OptionalDependencyNotAvailable()
100
+ except OptionalDependencyNotAvailable:
101
+ pass
102
+ else:
103
+ from .tokenization_big_bird_fast import BigBirdTokenizerFast
104
+
105
+ try:
106
+ if not is_torch_available():
107
+ raise OptionalDependencyNotAvailable()
108
+ except OptionalDependencyNotAvailable:
109
+ pass
110
+ else:
111
+ from .modeling_big_bird import (
112
+ BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST,
113
+ BigBirdForCausalLM,
114
+ BigBirdForMaskedLM,
115
+ BigBirdForMultipleChoice,
116
+ BigBirdForPreTraining,
117
+ BigBirdForQuestionAnswering,
118
+ BigBirdForSequenceClassification,
119
+ BigBirdForTokenClassification,
120
+ BigBirdLayer,
121
+ BigBirdModel,
122
+ BigBirdPreTrainedModel,
123
+ load_tf_weights_in_big_bird,
124
+ )
125
+
126
+ try:
127
+ if not is_flax_available():
128
+ raise OptionalDependencyNotAvailable()
129
+ except OptionalDependencyNotAvailable:
130
+ pass
131
+ else:
132
+ from .modeling_flax_big_bird import (
133
+ FlaxBigBirdForCausalLM,
134
+ FlaxBigBirdForMaskedLM,
135
+ FlaxBigBirdForMultipleChoice,
136
+ FlaxBigBirdForPreTraining,
137
+ FlaxBigBirdForQuestionAnswering,
138
+ FlaxBigBirdForSequenceClassification,
139
+ FlaxBigBirdForTokenClassification,
140
+ FlaxBigBirdModel,
141
+ FlaxBigBirdPreTrainedModel,
142
+ )
143
+
144
+ else:
145
+ import sys
146
+
147
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/configuration_big_bird.cpython-310.pyc ADDED
Binary file (7.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/convert_bigbird_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.64 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/modeling_big_bird.cpython-310.pyc ADDED
Binary file (83.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/modeling_flax_big_bird.cpython-310.pyc ADDED
Binary file (63.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/tokenization_big_bird.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/tokenization_big_bird_fast.cpython-310.pyc ADDED
Binary file (8.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/configuration_big_bird.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BigBird model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class BigBirdConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`BigBirdModel`]. It is used to instantiate an
33
+ BigBird model according to the specified arguments, defining the model architecture. Instantiating a configuration
34
+ with the defaults will yield a similar configuration to that of the BigBird
35
+ [google/bigbird-roberta-base](https://huggingface.co/google/bigbird-roberta-base) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 50358):
43
+ Vocabulary size of the BigBird model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`BigBirdModel`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimension of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ intermediate_size (`int`, *optional*, defaults to 3072):
52
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for the attention probabilities.
60
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 1024 or 2048 or 4096).
63
+ type_vocab_size (`int`, *optional*, defaults to 2):
64
+ The vocabulary size of the `token_type_ids` passed when calling [`BigBirdModel`].
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the layer normalization layers.
69
+ is_decoder (`bool`, *optional*, defaults to `False`):
70
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
73
+ relevant if `config.is_decoder=True`.
74
+ attention_type (`str`, *optional*, defaults to `"block_sparse"`)
75
+ Whether to use block sparse attention (with n complexity) as introduced in paper or original attention
76
+ layer (with n^2 complexity). Possible values are `"original_full"` and `"block_sparse"`.
77
+ use_bias (`bool`, *optional*, defaults to `True`)
78
+ Whether to use bias in query, key, value.
79
+ rescale_embeddings (`bool`, *optional*, defaults to `False`)
80
+ Whether to rescale embeddings with (hidden_size ** 0.5).
81
+ block_size (`int`, *optional*, defaults to 64)
82
+ Size of each block. Useful only when `attention_type == "block_sparse"`.
83
+ num_random_blocks (`int`, *optional*, defaults to 3)
84
+ Each query is going to attend these many number of random blocks. Useful only when `attention_type ==
85
+ "block_sparse"`.
86
+ classifier_dropout (`float`, *optional*):
87
+ The dropout ratio for the classification head.
88
+
89
+ Example:
90
+
91
+ ```python
92
+ >>> from transformers import BigBirdConfig, BigBirdModel
93
+
94
+ >>> # Initializing a BigBird google/bigbird-roberta-base style configuration
95
+ >>> configuration = BigBirdConfig()
96
+
97
+ >>> # Initializing a model (with random weights) from the google/bigbird-roberta-base style configuration
98
+ >>> model = BigBirdModel(configuration)
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config
102
+ ```"""
103
+
104
+ model_type = "big_bird"
105
+
106
+ def __init__(
107
+ self,
108
+ vocab_size=50358,
109
+ hidden_size=768,
110
+ num_hidden_layers=12,
111
+ num_attention_heads=12,
112
+ intermediate_size=3072,
113
+ hidden_act="gelu_new",
114
+ hidden_dropout_prob=0.1,
115
+ attention_probs_dropout_prob=0.1,
116
+ max_position_embeddings=4096,
117
+ type_vocab_size=2,
118
+ initializer_range=0.02,
119
+ layer_norm_eps=1e-12,
120
+ use_cache=True,
121
+ pad_token_id=0,
122
+ bos_token_id=1,
123
+ eos_token_id=2,
124
+ sep_token_id=66,
125
+ attention_type="block_sparse",
126
+ use_bias=True,
127
+ rescale_embeddings=False,
128
+ block_size=64,
129
+ num_random_blocks=3,
130
+ classifier_dropout=None,
131
+ **kwargs,
132
+ ):
133
+ super().__init__(
134
+ pad_token_id=pad_token_id,
135
+ bos_token_id=bos_token_id,
136
+ eos_token_id=eos_token_id,
137
+ sep_token_id=sep_token_id,
138
+ **kwargs,
139
+ )
140
+
141
+ self.vocab_size = vocab_size
142
+ self.max_position_embeddings = max_position_embeddings
143
+ self.hidden_size = hidden_size
144
+ self.num_hidden_layers = num_hidden_layers
145
+ self.num_attention_heads = num_attention_heads
146
+ self.intermediate_size = intermediate_size
147
+ self.hidden_act = hidden_act
148
+ self.hidden_dropout_prob = hidden_dropout_prob
149
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
150
+ self.initializer_range = initializer_range
151
+ self.type_vocab_size = type_vocab_size
152
+ self.layer_norm_eps = layer_norm_eps
153
+ self.use_cache = use_cache
154
+
155
+ self.rescale_embeddings = rescale_embeddings
156
+ self.attention_type = attention_type
157
+ self.use_bias = use_bias
158
+ self.block_size = block_size
159
+ self.num_random_blocks = num_random_blocks
160
+ self.classifier_dropout = classifier_dropout
161
+
162
+
163
+ class BigBirdOnnxConfig(OnnxConfig):
164
+ @property
165
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
166
+ if self.task == "multiple-choice":
167
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
168
+ else:
169
+ dynamic_axis = {0: "batch", 1: "sequence"}
170
+ return OrderedDict(
171
+ [
172
+ ("input_ids", dynamic_axis),
173
+ ("attention_mask", dynamic_axis),
174
+ ]
175
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BigBird checkpoint."""
16
+
17
+
18
+ import argparse
19
+
20
+ from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
21
+ from transformers.utils import logging
22
+
23
+
24
+ logging.set_verbosity_info()
25
+
26
+
27
+ def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, big_bird_config_file, pytorch_dump_path, is_trivia_qa):
28
+ # Initialise PyTorch model
29
+ config = BigBirdConfig.from_json_file(big_bird_config_file)
30
+ print(f"Building PyTorch model from configuration: {config}")
31
+
32
+ if is_trivia_qa:
33
+ model = BigBirdForQuestionAnswering(config)
34
+ else:
35
+ model = BigBirdForPreTraining(config)
36
+
37
+ # Load weights from tf checkpoint
38
+ load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=is_trivia_qa)
39
+
40
+ # Save pytorch-model
41
+ print(f"Save PyTorch model to {pytorch_dump_path}")
42
+ model.save_pretrained(pytorch_dump_path)
43
+
44
+
45
+ if __name__ == "__main__":
46
+ parser = argparse.ArgumentParser()
47
+ # Required parameters
48
+ parser.add_argument(
49
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
50
+ )
51
+ parser.add_argument(
52
+ "--big_bird_config_file",
53
+ default=None,
54
+ type=str,
55
+ required=True,
56
+ help=(
57
+ "The config json file corresponding to the pre-trained BERT model. \n"
58
+ "This specifies the model architecture."
59
+ ),
60
+ )
61
+ parser.add_argument(
62
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
63
+ )
64
+ parser.add_argument(
65
+ "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
66
+ )
67
+ args = parser.parse_args()
68
+ convert_tf_checkpoint_to_pytorch(
69
+ args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
70
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/modeling_big_bird.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/modeling_flax_big_bird.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/tokenization_big_bird.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for BigBird."""
16
+
17
+
18
+ import os
19
+ import re
20
+ from shutil import copyfile
21
+ from typing import Any, Dict, List, Optional, Tuple
22
+
23
+ import sentencepiece as spm
24
+
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
32
+
33
+
34
+ class BigBirdTokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a BigBird tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
37
+
38
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
39
+ this superclass for more information regarding those methods.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
44
+ contains the vocabulary necessary to instantiate a tokenizer.
45
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
46
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
47
+ token instead.
48
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
49
+ The begin of sequence token.
50
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
51
+ The end of sequence token.
52
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
53
+ The token used for padding, for example when batching sequences of different lengths.
54
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
55
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
56
+ sequence classification or for a text and a question for question answering. It is also used as the last
57
+ token of a sequence built with special tokens.
58
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
59
+ The token used for masking values. This is the token used when training this model with masked language
60
+ modeling. This is the token which the model will try to predict.
61
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
62
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
63
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
64
+ sp_model_kwargs (`dict`, *optional*):
65
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
66
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
67
+ to set:
68
+
69
+ - `enable_sampling`: Enable subword regularization.
70
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
71
+
72
+ - `nbest_size = {0,1}`: No sampling is performed.
73
+ - `nbest_size > 1`: samples from the nbest_size results.
74
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
75
+ using forward-filtering-and-backward-sampling algorithm.
76
+
77
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
78
+ BPE-dropout.
79
+ """
80
+
81
+ vocab_files_names = VOCAB_FILES_NAMES
82
+ model_input_names = ["input_ids", "attention_mask"]
83
+ prefix_tokens: List[int] = []
84
+
85
+ def __init__(
86
+ self,
87
+ vocab_file,
88
+ unk_token="<unk>",
89
+ bos_token="<s>",
90
+ eos_token="</s>",
91
+ pad_token="<pad>",
92
+ sep_token="[SEP]",
93
+ mask_token="[MASK]",
94
+ cls_token="[CLS]",
95
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
96
+ **kwargs,
97
+ ) -> None:
98
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
99
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
100
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
101
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
102
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
103
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
104
+
105
+ # Mask token behave like a normal word, i.e. include the space before it
106
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
107
+
108
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
109
+
110
+ self.vocab_file = vocab_file
111
+
112
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
113
+ self.sp_model.Load(vocab_file)
114
+
115
+ super().__init__(
116
+ bos_token=bos_token,
117
+ eos_token=eos_token,
118
+ unk_token=unk_token,
119
+ pad_token=pad_token,
120
+ sep_token=sep_token,
121
+ mask_token=mask_token,
122
+ cls_token=cls_token,
123
+ sp_model_kwargs=self.sp_model_kwargs,
124
+ **kwargs,
125
+ )
126
+
127
+ @property
128
+ def vocab_size(self):
129
+ return self.sp_model.get_piece_size()
130
+
131
+ def get_vocab(self):
132
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
133
+ vocab.update(self.added_tokens_encoder)
134
+ return vocab
135
+
136
+ def __getstate__(self):
137
+ state = self.__dict__.copy()
138
+ state["sp_model"] = None
139
+ return state
140
+
141
+ def __setstate__(self, d):
142
+ self.__dict__ = d
143
+
144
+ # for backward compatibility
145
+ if not hasattr(self, "sp_model_kwargs"):
146
+ self.sp_model_kwargs = {}
147
+
148
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
149
+ self.sp_model.Load(self.vocab_file)
150
+
151
+ def _tokenize(self, text: str) -> List[str]:
152
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
153
+ return self.sp_model.encode(text, out_type=str)
154
+
155
+ def _convert_token_to_id(self, token):
156
+ """Converts a token (str) in an id using the vocab."""
157
+ return self.sp_model.piece_to_id(token)
158
+
159
+ def _convert_id_to_token(self, index):
160
+ """Converts an index (integer) in a token (str) using the vocab."""
161
+ token = self.sp_model.IdToPiece(index)
162
+ return token
163
+
164
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
165
+ def convert_tokens_to_string(self, tokens):
166
+ """Converts a sequence of tokens (string) in a single string."""
167
+ current_sub_tokens = []
168
+ out_string = ""
169
+ prev_is_special = False
170
+ for token in tokens:
171
+ # make sure that special tokens are not decoded using sentencepiece model
172
+ if token in self.all_special_tokens:
173
+ if not prev_is_special:
174
+ out_string += " "
175
+ out_string += self.sp_model.decode(current_sub_tokens) + token
176
+ prev_is_special = True
177
+ current_sub_tokens = []
178
+ else:
179
+ current_sub_tokens.append(token)
180
+ prev_is_special = False
181
+ out_string += self.sp_model.decode(current_sub_tokens)
182
+ return out_string.strip()
183
+
184
+ def _decode(
185
+ self,
186
+ token_ids: List[int],
187
+ skip_special_tokens: bool = False,
188
+ clean_up_tokenization_spaces: bool = None,
189
+ spaces_between_special_tokens: bool = True,
190
+ **kwargs,
191
+ ) -> str:
192
+ self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
193
+
194
+ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
195
+
196
+ # To avoid mixing byte-level and unicode for byte-level BPT
197
+ # we need to build string separately for added tokens and byte-level tokens
198
+ # cf. https://github.com/huggingface/transformers/issues/1133
199
+ sub_texts = []
200
+ current_sub_text = []
201
+ for token in filtered_tokens:
202
+ if skip_special_tokens and token in self.all_special_ids:
203
+ continue
204
+ if token in self.added_tokens_encoder:
205
+ if current_sub_text:
206
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
207
+ current_sub_text = []
208
+ sub_texts.append(token)
209
+ else:
210
+ current_sub_text.append(token)
211
+ if current_sub_text:
212
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
213
+
214
+ # Mimic the behavior of the Rust tokenizer:
215
+ # No space before [MASK] and [SEP]
216
+ if spaces_between_special_tokens:
217
+ text = re.sub(r" (\[(MASK|SEP)\])", r"\1", " ".join(sub_texts))
218
+ else:
219
+ text = "".join(sub_texts)
220
+
221
+ clean_up_tokenization_spaces = (
222
+ clean_up_tokenization_spaces
223
+ if clean_up_tokenization_spaces is not None
224
+ else self.clean_up_tokenization_spaces
225
+ )
226
+ if clean_up_tokenization_spaces:
227
+ clean_text = self.clean_up_tokenization(text)
228
+ return clean_text
229
+ else:
230
+ return text
231
+
232
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
233
+ if not os.path.isdir(save_directory):
234
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
235
+ return
236
+ out_vocab_file = os.path.join(
237
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
238
+ )
239
+
240
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
241
+ copyfile(self.vocab_file, out_vocab_file)
242
+ elif not os.path.isfile(self.vocab_file):
243
+ with open(out_vocab_file, "wb") as fi:
244
+ content_spiece_model = self.sp_model.serialized_model_proto()
245
+ fi.write(content_spiece_model)
246
+
247
+ return (out_vocab_file,)
248
+
249
+ def build_inputs_with_special_tokens(
250
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
251
+ ) -> List[int]:
252
+ """
253
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
254
+ adding special tokens. A Big Bird sequence has the following format:
255
+
256
+ - single sequence: `[CLS] X [SEP]`
257
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
258
+
259
+ Args:
260
+ token_ids_0 (`List[int]`):
261
+ List of IDs to which the special tokens will be added.
262
+ token_ids_1 (`List[int]`, *optional*):
263
+ Optional second list of IDs for sequence pairs.
264
+
265
+ Returns:
266
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
267
+ """
268
+ if token_ids_1 is None:
269
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
270
+ cls = [self.cls_token_id]
271
+ sep = [self.sep_token_id]
272
+ return cls + token_ids_0 + sep + token_ids_1 + sep
273
+
274
+ def get_special_tokens_mask(
275
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
276
+ ) -> List[int]:
277
+ """
278
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
279
+ special tokens using the tokenizer `prepare_for_model` method.
280
+
281
+ Args:
282
+ token_ids_0 (`List[int]`):
283
+ List of IDs.
284
+ token_ids_1 (`List[int]`, *optional*):
285
+ Optional second list of IDs for sequence pairs.
286
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
287
+ Whether or not the token list is already formatted with special tokens for the model.
288
+
289
+ Returns:
290
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
291
+ """
292
+ if already_has_special_tokens:
293
+ return super().get_special_tokens_mask(
294
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
295
+ )
296
+
297
+ if token_ids_1 is None:
298
+ return [1] + ([0] * len(token_ids_0)) + [1]
299
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
300
+
301
+ def create_token_type_ids_from_sequences(
302
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
303
+ ) -> List[int]:
304
+ """
305
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
306
+ pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
307
+ sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
308
+
309
+ Args:
310
+ token_ids_0 (`List[int]`):
311
+ List of IDs.
312
+ token_ids_1 (`List[int]`, *optional*):
313
+ Optional second list of IDs for sequence pairs.
314
+
315
+ Returns:
316
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
317
+ """
318
+ sep = [self.sep_token_id]
319
+ cls = [self.cls_token_id]
320
+ if token_ids_1 is None:
321
+ return len(cls + token_ids_0 + sep) * [0]
322
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
llmeval-env/lib/python3.10/site-packages/transformers/models/big_bird/tokenization_big_bird_fast.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization classes for Big Bird model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_big_bird import BigBirdTokenizer
29
+ else:
30
+ BigBirdTokenizer = None
31
+
32
+ logger = logging.get_logger(__name__)
33
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
34
+
35
+
36
+ SPIECE_UNDERLINE = "▁"
37
+
38
+
39
+ class BigBirdTokenizerFast(PreTrainedTokenizerFast):
40
+ """
41
+ Construct a "fast" BigBird tokenizer (backed by HuggingFace's *tokenizers* library). Based on
42
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
43
+ tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
44
+ this superclass for more information regarding those methods
45
+
46
+ Args:
47
+ vocab_file (`str`):
48
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
49
+ contains the vocabulary necessary to instantiate a tokenizer.
50
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
51
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
52
+
53
+ <Tip>
54
+
55
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
56
+ sequence. The token used is the `cls_token`.
57
+
58
+ </Tip>
59
+
60
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
61
+ The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
62
+ that is used for the end of sequence. The token used is the `sep_token`.
63
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
64
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
65
+ token instead.
66
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
67
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
68
+ sequence classification or for a text and a question for question answering. It is also used as the last
69
+ token of a sequence built with special tokens.
70
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
71
+ The token used for padding, for example when batching sequences of different lengths.
72
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
73
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
74
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
75
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
76
+ The token used for masking values. This is the token used when training this model with masked language
77
+ modeling. This is the token which the model will try to predict.
78
+ """
79
+
80
+ vocab_files_names = VOCAB_FILES_NAMES
81
+ slow_tokenizer_class = BigBirdTokenizer
82
+ model_input_names = ["input_ids", "attention_mask"]
83
+ prefix_tokens: List[int] = []
84
+
85
+ def __init__(
86
+ self,
87
+ vocab_file=None,
88
+ tokenizer_file=None,
89
+ unk_token="<unk>",
90
+ bos_token="<s>",
91
+ eos_token="</s>",
92
+ pad_token="<pad>",
93
+ sep_token="[SEP]",
94
+ mask_token="[MASK]",
95
+ cls_token="[CLS]",
96
+ **kwargs,
97
+ ):
98
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
99
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
100
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
101
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
102
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
103
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
104
+
105
+ # Mask token behave like a normal word, i.e. include the space before it
106
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
107
+
108
+ super().__init__(
109
+ vocab_file,
110
+ tokenizer_file=tokenizer_file,
111
+ bos_token=bos_token,
112
+ eos_token=eos_token,
113
+ unk_token=unk_token,
114
+ sep_token=sep_token,
115
+ pad_token=pad_token,
116
+ cls_token=cls_token,
117
+ mask_token=mask_token,
118
+ **kwargs,
119
+ )
120
+
121
+ self.vocab_file = vocab_file
122
+
123
+ @property
124
+ def can_save_slow_tokenizer(self) -> bool:
125
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
126
+
127
+ def build_inputs_with_special_tokens(
128
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
129
+ ) -> List[int]:
130
+ """
131
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
132
+ adding special tokens. An BigBird sequence has the following format:
133
+
134
+ - single sequence: `[CLS] X [SEP]`
135
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
136
+
137
+ Args:
138
+ token_ids_0 (`List[int]`):
139
+ List of IDs to which the special tokens will be added
140
+ token_ids_1 (`List[int]`, *optional*):
141
+ Optional second list of IDs for sequence pairs.
142
+
143
+ Returns:
144
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
145
+ """
146
+ sep = [self.sep_token_id]
147
+ cls = [self.cls_token_id]
148
+ if token_ids_1 is None:
149
+ return cls + token_ids_0 + sep
150
+ return cls + token_ids_0 + sep + token_ids_1 + sep
151
+
152
+ def get_special_tokens_mask(
153
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
154
+ ) -> List[int]:
155
+ """
156
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
157
+ special tokens using the tokenizer `prepare_for_model` method.
158
+
159
+ Args:
160
+ token_ids_0 (`List[int]`):
161
+ List of ids.
162
+ token_ids_1 (`List[int]`, *optional*):
163
+ Optional second list of IDs for sequence pairs.
164
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
165
+ Set to True if the token list is already formatted with special tokens for the model
166
+
167
+ Returns:
168
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
169
+ """
170
+
171
+ if already_has_special_tokens:
172
+ if token_ids_1 is not None:
173
+ raise ValueError(
174
+ "You should not supply a second sequence if the provided sequence of "
175
+ "ids is already formatted with special tokens for the model."
176
+ )
177
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
178
+
179
+ if token_ids_1 is None:
180
+ return [1] + ([0] * len(token_ids_0)) + [1]
181
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
182
+
183
+ def create_token_type_ids_from_sequences(
184
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
185
+ ) -> List[int]:
186
+ """
187
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
188
+ sequence pair mask has the following format:
189
+
190
+ ```
191
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
192
+ | first sequence | second sequence |
193
+ ```
194
+
195
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
196
+
197
+ Args:
198
+ token_ids_0 (`List[int]`):
199
+ List of ids.
200
+ token_ids_1 (`List[int]`, *optional*):
201
+ Optional second list of IDs for sequence pairs.
202
+
203
+ Returns:
204
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
205
+ """
206
+ sep = [self.sep_token_id]
207
+ cls = [self.cls_token_id]
208
+
209
+ if token_ids_1 is None:
210
+ return len(cls + token_ids_0 + sep) * [0]
211
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
212
+
213
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
214
+ if not self.can_save_slow_tokenizer:
215
+ raise ValueError(
216
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
217
+ "tokenizer."
218
+ )
219
+
220
+ if not os.path.isdir(save_directory):
221
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
222
+ return
223
+ out_vocab_file = os.path.join(
224
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
225
+ )
226
+
227
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
228
+ copyfile(self.vocab_file, out_vocab_file)
229
+
230
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_bigbird_pegasus": [
21
+ "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "BigBirdPegasusConfig",
23
+ "BigBirdPegasusOnnxConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_bigbird_pegasus"] = [
34
+ "BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "BigBirdPegasusForCausalLM",
36
+ "BigBirdPegasusForConditionalGeneration",
37
+ "BigBirdPegasusForQuestionAnswering",
38
+ "BigBirdPegasusForSequenceClassification",
39
+ "BigBirdPegasusModel",
40
+ "BigBirdPegasusPreTrainedModel",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_bigbird_pegasus import (
46
+ BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ BigBirdPegasusConfig,
48
+ BigBirdPegasusOnnxConfig,
49
+ )
50
+
51
+ try:
52
+ if not is_torch_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .modeling_bigbird_pegasus import (
58
+ BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
59
+ BigBirdPegasusForCausalLM,
60
+ BigBirdPegasusForConditionalGeneration,
61
+ BigBirdPegasusForQuestionAnswering,
62
+ BigBirdPegasusForSequenceClassification,
63
+ BigBirdPegasusModel,
64
+ BigBirdPegasusPreTrainedModel,
65
+ )
66
+
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/configuration_bigbird_pegasus.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/convert_bigbird_pegasus_tf_to_pytorch.cpython-310.pyc ADDED
Binary file (5.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Google Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BigBirdPegasus model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
23
+ from ...onnx.utils import compute_effective_axis_dimension
24
+ from ...utils import TensorType, is_torch_available, logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class BigBirdPegasusConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`BigBirdPegasusModel`]. It is used to instantiate
36
+ an BigBirdPegasus model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the BigBirdPegasus
38
+ [google/bigbird-pegasus-large-arxiv](https://huggingface.co/google/bigbird-pegasus-large-arxiv) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+
44
+ Args:
45
+ vocab_size (`int`, *optional*, defaults to 96103):
46
+ Vocabulary size of the BigBirdPegasus model. Defines the number of different tokens that can be represented
47
+ by the `inputs_ids` passed when calling [`BigBirdPegasusModel`].
48
+ d_model (`int`, *optional*, defaults to 1024):
49
+ Dimension of the layers and the pooler layer.
50
+ encoder_layers (`int`, *optional*, defaults to 16):
51
+ Number of encoder layers.
52
+ decoder_layers (`int`, *optional*, defaults to 16):
53
+ Number of decoder layers.
54
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
57
+ Number of attention heads for each attention layer in the Transformer decoder.
58
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
59
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
60
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
61
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
62
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu_new"`):
63
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
64
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
65
+ dropout (`float`, *optional*, defaults to 0.1):
66
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
67
+ attention_dropout (`float`, *optional*, defaults to 0.0):
68
+ The dropout ratio for the attention probabilities.
69
+ activation_dropout (`float`, *optional*, defaults to 0.0):
70
+ The dropout ratio for activations inside the fully connected layer.
71
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
72
+ The dropout ratio for classifier.
73
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
74
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
75
+ just in case (e.g., 1024 or 2048 or 4096).
76
+ init_std (`float`, *optional*, defaults to 0.02):
77
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
78
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
79
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
80
+ for more details.
81
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
82
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
83
+ for more details.
84
+ use_cache (`bool`, *optional*, defaults to `True`):
85
+ Whether or not the model should return the last key/values attentions (not used by all models).
86
+ attention_type (`str`, *optional*, defaults to `"block_sparse"`)
87
+ Whether to use block sparse attention (with n complexity) as introduced in paper or original attention
88
+ layer (with n^2 complexity) in encoder. Possible values are `"original_full"` and `"block_sparse"`.
89
+ use_bias (`bool`, *optional*, defaults to `False`)
90
+ Whether to use bias in query, key, value.
91
+ block_size (`int`, *optional*, defaults to 64)
92
+ Size of each block. Useful only when `attention_type == "block_sparse"`.
93
+ num_random_blocks (`int`, *optional*, defaults to 3)
94
+ Each query is going to attend these many number of random blocks. Useful only when `attention_type ==
95
+ "block_sparse"`.
96
+ scale_embeddings (`bool`, *optional*, defaults to `True`)
97
+ Whether to rescale embeddings with (hidden_size ** 0.5).
98
+
99
+ Example:
100
+
101
+ ```python
102
+ >>> from transformers import BigBirdPegasusConfig, BigBirdPegasusModel
103
+
104
+ >>> # Initializing a BigBirdPegasus bigbird-pegasus-base style configuration
105
+ >>> configuration = BigBirdPegasusConfig()
106
+
107
+ >>> # Initializing a model (with random weights) from the bigbird-pegasus-base style configuration
108
+ >>> model = BigBirdPegasusModel(configuration)
109
+
110
+ >>> # Accessing the model configuration
111
+ >>> configuration = model.config
112
+ ```"""
113
+
114
+ model_type = "bigbird_pegasus"
115
+ keys_to_ignore_at_inference = ["past_key_values"]
116
+ attribute_map = {
117
+ "num_attention_heads": "encoder_attention_heads",
118
+ "hidden_size": "d_model",
119
+ "attention_probs_dropout_prob": "attention_dropout",
120
+ }
121
+
122
+ def __init__(
123
+ self,
124
+ vocab_size=96103,
125
+ max_position_embeddings=4096,
126
+ encoder_layers=16,
127
+ encoder_ffn_dim=4096,
128
+ encoder_attention_heads=16,
129
+ decoder_layers=16,
130
+ decoder_ffn_dim=4096,
131
+ decoder_attention_heads=16,
132
+ encoder_layerdrop=0.0,
133
+ decoder_layerdrop=0.0,
134
+ use_cache=True,
135
+ is_encoder_decoder=True,
136
+ activation_function="gelu_new",
137
+ d_model=1024,
138
+ dropout=0.1,
139
+ attention_dropout=0.0,
140
+ activation_dropout=0.0,
141
+ init_std=0.02,
142
+ decoder_start_token_id=2,
143
+ classifier_dropout=0.0,
144
+ scale_embedding=True,
145
+ pad_token_id=0,
146
+ bos_token_id=2,
147
+ eos_token_id=1,
148
+ attention_type="block_sparse", # only for encoder
149
+ block_size=64,
150
+ num_random_blocks=3,
151
+ use_bias=False,
152
+ **kwargs,
153
+ ):
154
+ self.vocab_size = vocab_size
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.d_model = d_model
157
+ self.encoder_ffn_dim = encoder_ffn_dim
158
+ self.encoder_layers = encoder_layers
159
+ self.encoder_attention_heads = encoder_attention_heads
160
+ self.decoder_ffn_dim = decoder_ffn_dim
161
+ self.decoder_layers = decoder_layers
162
+ self.decoder_attention_heads = decoder_attention_heads
163
+ self.dropout = dropout
164
+ self.attention_dropout = attention_dropout
165
+ self.activation_dropout = activation_dropout
166
+ self.activation_function = activation_function
167
+ self.init_std = init_std
168
+ self.encoder_layerdrop = encoder_layerdrop
169
+ self.decoder_layerdrop = decoder_layerdrop
170
+ self.classifier_dropout = classifier_dropout
171
+ self.use_cache = use_cache
172
+ self.num_hidden_layers = encoder_layers
173
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
174
+
175
+ # extra config
176
+ self.attention_type = attention_type
177
+ self.block_size = block_size
178
+ self.num_random_blocks = num_random_blocks
179
+ self.use_bias = use_bias
180
+
181
+ super().__init__(
182
+ pad_token_id=pad_token_id,
183
+ bos_token_id=bos_token_id,
184
+ eos_token_id=eos_token_id,
185
+ is_encoder_decoder=is_encoder_decoder,
186
+ decoder_start_token_id=decoder_start_token_id,
187
+ **kwargs,
188
+ )
189
+
190
+
191
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig
192
+ class BigBirdPegasusOnnxConfig(OnnxSeq2SeqConfigWithPast):
193
+ @property
194
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
195
+ if self.task in ["default", "seq2seq-lm"]:
196
+ common_inputs = OrderedDict(
197
+ [
198
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
199
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
200
+ ]
201
+ )
202
+
203
+ if self.use_past:
204
+ common_inputs["decoder_input_ids"] = {0: "batch"}
205
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
206
+ else:
207
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
208
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
209
+
210
+ if self.use_past:
211
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
212
+ elif self.task == "causal-lm":
213
+ # TODO: figure this case out.
214
+ common_inputs = OrderedDict(
215
+ [
216
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
217
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
218
+ ]
219
+ )
220
+ if self.use_past:
221
+ num_encoder_layers, _ = self.num_layers
222
+ for i in range(num_encoder_layers):
223
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
224
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
225
+ else:
226
+ common_inputs = OrderedDict(
227
+ [
228
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
229
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
230
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
231
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
232
+ ]
233
+ )
234
+
235
+ return common_inputs
236
+
237
+ @property
238
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
239
+ if self.task in ["default", "seq2seq-lm"]:
240
+ common_outputs = super().outputs
241
+ else:
242
+ common_outputs = super(OnnxConfigWithPast, self).outputs
243
+ if self.use_past:
244
+ num_encoder_layers, _ = self.num_layers
245
+ for i in range(num_encoder_layers):
246
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
247
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
248
+ return common_outputs
249
+
250
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
251
+ self,
252
+ tokenizer: PreTrainedTokenizer,
253
+ batch_size: int = -1,
254
+ seq_length: int = -1,
255
+ is_pair: bool = False,
256
+ framework: Optional[TensorType] = None,
257
+ ) -> Mapping[str, Any]:
258
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
259
+ tokenizer, batch_size, seq_length, is_pair, framework
260
+ )
261
+
262
+ # Generate decoder inputs
263
+ decoder_seq_length = seq_length if not self.use_past else 1
264
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
265
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
266
+ )
267
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
268
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
269
+
270
+ if self.use_past:
271
+ if not is_torch_available():
272
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
273
+ else:
274
+ import torch
275
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
276
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
277
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
278
+ encoder_shape = (
279
+ batch,
280
+ num_encoder_attention_heads,
281
+ encoder_seq_length,
282
+ self._config.hidden_size // num_encoder_attention_heads,
283
+ )
284
+ decoder_past_length = decoder_seq_length + 3
285
+ decoder_shape = (
286
+ batch,
287
+ num_decoder_attention_heads,
288
+ decoder_past_length,
289
+ self._config.hidden_size // num_decoder_attention_heads,
290
+ )
291
+
292
+ common_inputs["decoder_attention_mask"] = torch.cat(
293
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
294
+ )
295
+
296
+ common_inputs["past_key_values"] = []
297
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
298
+ num_encoder_layers, num_decoder_layers = self.num_layers
299
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
300
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
301
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
302
+
303
+ for _ in range(min_num_layers):
304
+ common_inputs["past_key_values"].append(
305
+ (
306
+ torch.zeros(decoder_shape),
307
+ torch.zeros(decoder_shape),
308
+ torch.zeros(encoder_shape),
309
+ torch.zeros(encoder_shape),
310
+ )
311
+ )
312
+ # TODO: test this.
313
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
314
+ for _ in range(min_num_layers, max_num_layers):
315
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
316
+ return common_inputs
317
+
318
+ def _generate_dummy_inputs_for_causal_lm(
319
+ self,
320
+ tokenizer: PreTrainedTokenizer,
321
+ batch_size: int = -1,
322
+ seq_length: int = -1,
323
+ is_pair: bool = False,
324
+ framework: Optional[TensorType] = None,
325
+ ) -> Mapping[str, Any]:
326
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
327
+ tokenizer, batch_size, seq_length, is_pair, framework
328
+ )
329
+
330
+ if self.use_past:
331
+ if not is_torch_available():
332
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
333
+ else:
334
+ import torch
335
+ batch, seqlen = common_inputs["input_ids"].shape
336
+ # Not using the same length for past_key_values
337
+ past_key_values_length = seqlen + 2
338
+ num_encoder_layers, _ = self.num_layers
339
+ num_encoder_attention_heads, _ = self.num_attention_heads
340
+ past_shape = (
341
+ batch,
342
+ num_encoder_attention_heads,
343
+ past_key_values_length,
344
+ self._config.hidden_size // num_encoder_attention_heads,
345
+ )
346
+
347
+ mask_dtype = common_inputs["attention_mask"].dtype
348
+ common_inputs["attention_mask"] = torch.cat(
349
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
350
+ )
351
+ common_inputs["past_key_values"] = [
352
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
353
+ ]
354
+ return common_inputs
355
+
356
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
357
+ self,
358
+ tokenizer: PreTrainedTokenizer,
359
+ batch_size: int = -1,
360
+ seq_length: int = -1,
361
+ is_pair: bool = False,
362
+ framework: Optional[TensorType] = None,
363
+ ) -> Mapping[str, Any]:
364
+ # Copied from OnnxConfig.generate_dummy_inputs
365
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
366
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
367
+ batch_size = compute_effective_axis_dimension(
368
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
369
+ )
370
+
371
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
372
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
373
+ seq_length = compute_effective_axis_dimension(
374
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
375
+ )
376
+
377
+ # Generate dummy inputs according to compute batch and sequence
378
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
379
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
380
+ return common_inputs
381
+
382
+ def generate_dummy_inputs(
383
+ self,
384
+ tokenizer: PreTrainedTokenizer,
385
+ batch_size: int = -1,
386
+ seq_length: int = -1,
387
+ is_pair: bool = False,
388
+ framework: Optional[TensorType] = None,
389
+ ) -> Mapping[str, Any]:
390
+ if self.task in ["default", "seq2seq-lm"]:
391
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
392
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
393
+ )
394
+
395
+ elif self.task == "causal-lm":
396
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
397
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
398
+ )
399
+ else:
400
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
401
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
402
+ )
403
+
404
+ return common_inputs
405
+
406
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
407
+ if self.task in ["default", "seq2seq-lm"]:
408
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
409
+ else:
410
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
411
+ flattened_output, name, idx, t
412
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ from typing import Dict
18
+
19
+ import tensorflow as tf
20
+ import torch
21
+ from tqdm import tqdm
22
+
23
+ from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
24
+
25
+
26
+ INIT_COMMON = [
27
+ # tf -> hf
28
+ ("/", "."),
29
+ ("layer_", "layers."),
30
+ ("kernel", "weight"),
31
+ ("beta", "bias"),
32
+ ("gamma", "weight"),
33
+ ("pegasus", "model"),
34
+ ]
35
+ END_COMMON = [
36
+ (".output.dense", ".fc2"),
37
+ ("intermediate.LayerNorm", "final_layer_norm"),
38
+ ("intermediate.dense", "fc1"),
39
+ ]
40
+
41
+ DECODER_PATTERNS = (
42
+ INIT_COMMON
43
+ + [
44
+ ("attention.self.LayerNorm", "self_attn_layer_norm"),
45
+ ("attention.output.dense", "self_attn.out_proj"),
46
+ ("attention.self", "self_attn"),
47
+ ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
48
+ ("attention.encdec_output.dense", "encoder_attn.out_proj"),
49
+ ("attention.encdec", "encoder_attn"),
50
+ ("key", "k_proj"),
51
+ ("value", "v_proj"),
52
+ ("query", "q_proj"),
53
+ ("decoder.LayerNorm", "decoder.layernorm_embedding"),
54
+ ]
55
+ + END_COMMON
56
+ )
57
+
58
+ REMAINING_PATTERNS = (
59
+ INIT_COMMON
60
+ + [
61
+ ("embeddings.word_embeddings", "shared.weight"),
62
+ ("embeddings.position_embeddings", "embed_positions.weight"),
63
+ ("attention.self.LayerNorm", "self_attn_layer_norm"),
64
+ ("attention.output.dense", "self_attn.output"),
65
+ ("attention.self", "self_attn.self"),
66
+ ("encoder.LayerNorm", "encoder.layernorm_embedding"),
67
+ ]
68
+ + END_COMMON
69
+ )
70
+
71
+ KEYS_TO_IGNORE = [
72
+ "encdec/key/bias",
73
+ "encdec/query/bias",
74
+ "encdec/value/bias",
75
+ "self/key/bias",
76
+ "self/query/bias",
77
+ "self/value/bias",
78
+ "encdec_output/dense/bias",
79
+ "attention/output/dense/bias",
80
+ ]
81
+
82
+
83
+ def rename_state_dict_key(k, patterns):
84
+ for tf_name, hf_name in patterns:
85
+ k = k.replace(tf_name, hf_name)
86
+ return k
87
+
88
+
89
+ def convert_bigbird_pegasus(tf_weights: dict, config_update: dict) -> BigBirdPegasusForConditionalGeneration:
90
+ cfg = BigBirdPegasusConfig(**config_update)
91
+ torch_model = BigBirdPegasusForConditionalGeneration(cfg)
92
+ state_dict = torch_model.state_dict()
93
+ mapping = {}
94
+
95
+ # separating decoder weights
96
+ decoder_weights = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder")}
97
+ remaining_weights = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder")}
98
+
99
+ for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion"):
100
+ conditions = [k.endswith(ending) for ending in KEYS_TO_IGNORE]
101
+ if any(conditions):
102
+ continue
103
+ patterns = DECODER_PATTERNS
104
+ new_k = rename_state_dict_key(k, patterns)
105
+ if new_k not in state_dict:
106
+ raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
107
+ if any(True if i in k else False for i in ["dense", "query", "key", "value"]):
108
+ v = v.T
109
+ mapping[new_k] = torch.from_numpy(v)
110
+ assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
111
+
112
+ for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion"):
113
+ conditions = [k.endswith(ending) for ending in KEYS_TO_IGNORE]
114
+ if any(conditions):
115
+ continue
116
+ patterns = REMAINING_PATTERNS
117
+ new_k = rename_state_dict_key(k, patterns)
118
+ if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
119
+ raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
120
+ if any(True if i in k else False for i in ["dense", "query", "key", "value"]):
121
+ v = v.T
122
+ mapping[new_k] = torch.from_numpy(v)
123
+ if k != "pegasus/embeddings/position_embeddings":
124
+ assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
125
+
126
+ mapping["model.encoder.embed_positions.weight"] = mapping["model.embed_positions.weight"]
127
+ mapping["model.decoder.embed_positions.weight"] = mapping.pop("model.embed_positions.weight")
128
+ missing, extra = torch_model.load_state_dict(mapping, strict=False)
129
+ unexpected_missing = [
130
+ k
131
+ for k in missing
132
+ if k
133
+ not in [
134
+ "final_logits_bias",
135
+ "model.encoder.embed_tokens.weight",
136
+ "model.decoder.embed_tokens.weight",
137
+ "lm_head.weight",
138
+ ]
139
+ ]
140
+ assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
141
+ assert extra == [], f"no matches found for the following tf keys {extra}"
142
+ return torch_model
143
+
144
+
145
+ def get_tf_weights_as_numpy(path) -> Dict:
146
+ init_vars = tf.train.list_variables(path)
147
+ tf_weights = {}
148
+ ignore_name = ["global_step"]
149
+ for name, shape in tqdm(init_vars, desc="converting tf checkpoint to dict"):
150
+ skip_key = any(pat in name for pat in ignore_name)
151
+ if skip_key:
152
+ continue
153
+ array = tf.train.load_variable(path, name)
154
+ tf_weights[name] = array
155
+ return tf_weights
156
+
157
+
158
+ def convert_bigbird_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str, config_update: dict):
159
+ tf_weights = get_tf_weights_as_numpy(ckpt_path)
160
+ torch_model = convert_bigbird_pegasus(tf_weights, config_update)
161
+ torch_model.save_pretrained(save_dir)
162
+
163
+
164
+ if __name__ == "__main__":
165
+ parser = argparse.ArgumentParser()
166
+ parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
167
+ parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
168
+ args = parser.parse_args()
169
+ config_update = {}
170
+ convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bridgetower/__pycache__/image_processing_bridgetower.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__init__.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_mobilenet_v2": [
21
+ "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "MobileNetV2Config",
23
+ "MobileNetV2OnnxConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_vision_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["feature_extraction_mobilenet_v2"] = ["MobileNetV2FeatureExtractor"]
34
+ _import_structure["image_processing_mobilenet_v2"] = ["MobileNetV2ImageProcessor"]
35
+
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_mobilenet_v2"] = [
44
+ "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "MobileNetV2ForImageClassification",
46
+ "MobileNetV2ForSemanticSegmentation",
47
+ "MobileNetV2Model",
48
+ "MobileNetV2PreTrainedModel",
49
+ "load_tf_weights_in_mobilenet_v2",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_mobilenet_v2 import (
55
+ MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
56
+ MobileNetV2Config,
57
+ MobileNetV2OnnxConfig,
58
+ )
59
+
60
+ try:
61
+ if not is_vision_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .feature_extraction_mobilenet_v2 import MobileNetV2FeatureExtractor
67
+ from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor
68
+
69
+ try:
70
+ if not is_torch_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .modeling_mobilenet_v2 import (
76
+ MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
77
+ MobileNetV2ForImageClassification,
78
+ MobileNetV2ForSemanticSegmentation,
79
+ MobileNetV2Model,
80
+ MobileNetV2PreTrainedModel,
81
+ load_tf_weights_in_mobilenet_v2,
82
+ )
83
+
84
+
85
+ else:
86
+ import sys
87
+
88
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/configuration_mobilenet_v2.cpython-310.pyc ADDED
Binary file (6.53 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/feature_extraction_mobilenet_v2.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/image_processing_mobilenet_v2.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/__pycache__/modeling_mobilenet_v2.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MobileNetV2 model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class MobileNetV2Config(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`MobileNetV2Model`]. It is used to instantiate a
36
+ MobileNetV2 model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the MobileNetV2
38
+ [google/mobilenet_v2_1.0_224](https://huggingface.co/google/mobilenet_v2_1.0_224) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ num_channels (`int`, *optional*, defaults to 3):
45
+ The number of input channels.
46
+ image_size (`int`, *optional*, defaults to 224):
47
+ The size (resolution) of each image.
48
+ depth_multiplier (`float`, *optional*, defaults to 1.0):
49
+ Shrinks or expands the number of channels in each layer. Default is 1.0, which starts the network with 32
50
+ channels. This is sometimes also called "alpha" or "width multiplier".
51
+ depth_divisible_by (`int`, *optional*, defaults to 8):
52
+ The number of channels in each layer will always be a multiple of this number.
53
+ min_depth (`int`, *optional*, defaults to 8):
54
+ All layers will have at least this many channels.
55
+ expand_ratio (`float`, *optional*, defaults to 6.0):
56
+ The number of output channels of the first layer in each block is input channels times expansion ratio.
57
+ output_stride (`int`, *optional*, defaults to 32):
58
+ The ratio between the spatial resolution of the input and output feature maps. By default the model reduces
59
+ the input dimensions by a factor of 32. If `output_stride` is 8 or 16, the model uses dilated convolutions
60
+ on the depthwise layers instead of regular convolutions, so that the feature maps never become more than 8x
61
+ or 16x smaller than the input image.
62
+ first_layer_is_expansion (`bool`, *optional*, defaults to `True`):
63
+ True if the very first convolution layer is also the expansion layer for the first expansion block.
64
+ finegrained_output (`bool`, *optional*, defaults to `True`):
65
+ If true, the number of output channels in the final convolution layer will stay large (1280) even if
66
+ `depth_multiplier` is less than 1.
67
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu6"`):
68
+ The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
69
+ tf_padding (`bool`, *optional*, defaults to `True`):
70
+ Whether to use TensorFlow padding rules on the convolution layers.
71
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.8):
72
+ The dropout ratio for attached classifiers.
73
+ initializer_range (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+ layer_norm_eps (`float`, *optional*, defaults to 0.001):
76
+ The epsilon used by the layer normalization layers.
77
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
78
+ The index that is ignored by the loss function of the semantic segmentation model.
79
+
80
+ Example:
81
+
82
+ ```python
83
+ >>> from transformers import MobileNetV2Config, MobileNetV2Model
84
+
85
+ >>> # Initializing a "mobilenet_v2_1.0_224" style configuration
86
+ >>> configuration = MobileNetV2Config()
87
+
88
+ >>> # Initializing a model from the "mobilenet_v2_1.0_224" style configuration
89
+ >>> model = MobileNetV2Model(configuration)
90
+
91
+ >>> # Accessing the model configuration
92
+ >>> configuration = model.config
93
+ ```"""
94
+
95
+ model_type = "mobilenet_v2"
96
+
97
+ def __init__(
98
+ self,
99
+ num_channels=3,
100
+ image_size=224,
101
+ depth_multiplier=1.0,
102
+ depth_divisible_by=8,
103
+ min_depth=8,
104
+ expand_ratio=6.0,
105
+ output_stride=32,
106
+ first_layer_is_expansion=True,
107
+ finegrained_output=True,
108
+ hidden_act="relu6",
109
+ tf_padding=True,
110
+ classifier_dropout_prob=0.8,
111
+ initializer_range=0.02,
112
+ layer_norm_eps=0.001,
113
+ semantic_loss_ignore_index=255,
114
+ **kwargs,
115
+ ):
116
+ super().__init__(**kwargs)
117
+
118
+ if depth_multiplier <= 0:
119
+ raise ValueError("depth_multiplier must be greater than zero.")
120
+
121
+ self.num_channels = num_channels
122
+ self.image_size = image_size
123
+ self.depth_multiplier = depth_multiplier
124
+ self.depth_divisible_by = depth_divisible_by
125
+ self.min_depth = min_depth
126
+ self.expand_ratio = expand_ratio
127
+ self.output_stride = output_stride
128
+ self.first_layer_is_expansion = first_layer_is_expansion
129
+ self.finegrained_output = finegrained_output
130
+ self.hidden_act = hidden_act
131
+ self.tf_padding = tf_padding
132
+ self.classifier_dropout_prob = classifier_dropout_prob
133
+ self.initializer_range = initializer_range
134
+ self.layer_norm_eps = layer_norm_eps
135
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
136
+
137
+
138
+ class MobileNetV2OnnxConfig(OnnxConfig):
139
+ torch_onnx_minimum_version = version.parse("1.11")
140
+
141
+ @property
142
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
143
+ return OrderedDict([("pixel_values", {0: "batch"})])
144
+
145
+ @property
146
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
147
+ if self.task == "image-classification":
148
+ return OrderedDict([("logits", {0: "batch"})])
149
+ else:
150
+ return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
151
+
152
+ @property
153
+ def atol_for_validation(self) -> float:
154
+ return 1e-4
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/convert_original_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MobileNetV2 checkpoints from the tensorflow/models library."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ import re
21
+ from pathlib import Path
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import (
29
+ MobileNetV2Config,
30
+ MobileNetV2ForImageClassification,
31
+ MobileNetV2ForSemanticSegmentation,
32
+ MobileNetV2ImageProcessor,
33
+ load_tf_weights_in_mobilenet_v2,
34
+ )
35
+ from transformers.utils import logging
36
+
37
+
38
+ logging.set_verbosity_info()
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ def get_mobilenet_v2_config(model_name):
43
+ config = MobileNetV2Config(layer_norm_eps=0.001)
44
+
45
+ if "quant" in model_name:
46
+ raise ValueError("Quantized models are not supported.")
47
+
48
+ matches = re.match(r"^.*mobilenet_v2_([^_]*)_([^_]*)$", model_name)
49
+ if matches:
50
+ config.depth_multiplier = float(matches[1])
51
+ config.image_size = int(matches[2])
52
+
53
+ if model_name.startswith("deeplabv3_"):
54
+ config.output_stride = 8
55
+ config.num_labels = 21
56
+ filename = "pascal-voc-id2label.json"
57
+ else:
58
+ # The TensorFlow version of MobileNetV2 predicts 1001 classes instead
59
+ # of the usual 1000. The first class (index 0) is "background".
60
+ config.num_labels = 1001
61
+ filename = "imagenet-1k-id2label.json"
62
+
63
+ repo_id = "huggingface/label-files"
64
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
65
+
66
+ if config.num_labels == 1001:
67
+ id2label = {int(k) + 1: v for k, v in id2label.items()}
68
+ id2label[0] = "background"
69
+ else:
70
+ id2label = {int(k): v for k, v in id2label.items()}
71
+
72
+ config.id2label = id2label
73
+ config.label2id = {v: k for k, v in id2label.items()}
74
+
75
+ return config
76
+
77
+
78
+ # We will verify our results on an image of cute cats
79
+ def prepare_img():
80
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
81
+ im = Image.open(requests.get(url, stream=True).raw)
82
+ return im
83
+
84
+
85
+ @torch.no_grad()
86
+ def convert_movilevit_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
87
+ """
88
+ Copy/paste/tweak model's weights to our MobileNetV2 structure.
89
+ """
90
+ config = get_mobilenet_v2_config(model_name)
91
+
92
+ # Load 🤗 model
93
+ if model_name.startswith("deeplabv3_"):
94
+ model = MobileNetV2ForSemanticSegmentation(config).eval()
95
+ else:
96
+ model = MobileNetV2ForImageClassification(config).eval()
97
+
98
+ # Load weights from TensorFlow checkpoint
99
+ load_tf_weights_in_mobilenet_v2(model, config, checkpoint_path)
100
+
101
+ # Check outputs on an image, prepared by MobileNetV2ImageProcessor
102
+ image_processor = MobileNetV2ImageProcessor(
103
+ crop_size={"width": config.image_size, "height": config.image_size},
104
+ size={"shortest_edge": config.image_size + 32},
105
+ )
106
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
107
+ outputs = model(**encoding)
108
+ logits = outputs.logits
109
+
110
+ if model_name.startswith("deeplabv3_"):
111
+ assert logits.shape == (1, 21, 65, 65)
112
+
113
+ if model_name == "deeplabv3_mobilenet_v2_1.0_513":
114
+ expected_logits = torch.tensor(
115
+ [
116
+ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
117
+ [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
118
+ [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
119
+ ]
120
+ )
121
+
122
+ else:
123
+ raise ValueError(f"Unknown model name: {model_name}")
124
+
125
+ assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4)
126
+ else:
127
+ assert logits.shape == (1, 1001)
128
+
129
+ if model_name == "mobilenet_v2_1.4_224":
130
+ expected_logits = torch.tensor([0.0181, -1.0015, 0.4688])
131
+ elif model_name == "mobilenet_v2_1.0_224":
132
+ expected_logits = torch.tensor([0.2445, -1.1993, 0.1905])
133
+ elif model_name == "mobilenet_v2_0.75_160":
134
+ expected_logits = torch.tensor([0.2482, 0.4136, 0.6669])
135
+ elif model_name == "mobilenet_v2_0.35_96":
136
+ expected_logits = torch.tensor([0.1451, -0.4624, 0.7192])
137
+ else:
138
+ expected_logits = None
139
+
140
+ if expected_logits is not None:
141
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
142
+
143
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
144
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
145
+ model.save_pretrained(pytorch_dump_folder_path)
146
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
147
+ image_processor.save_pretrained(pytorch_dump_folder_path)
148
+
149
+ if push_to_hub:
150
+ print("Pushing to the hub...")
151
+ repo_id = "google/" + model_name
152
+ image_processor.push_to_hub(repo_id)
153
+ model.push_to_hub(repo_id)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ parser = argparse.ArgumentParser()
158
+ # Required parameters
159
+ parser.add_argument(
160
+ "--model_name",
161
+ default="mobilenet_v2_1.0_224",
162
+ type=str,
163
+ help="Name of the MobileNetV2 model you'd like to convert. Should in the form 'mobilenet_v2_<depth>_<size>'.",
164
+ )
165
+ parser.add_argument(
166
+ "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
167
+ )
168
+ parser.add_argument(
169
+ "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
170
+ )
171
+ parser.add_argument(
172
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
173
+ )
174
+
175
+ args = parser.parse_args()
176
+ convert_movilevit_checkpoint(
177
+ args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
178
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for MobileNetV2."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class MobileNetV2FeatureExtractor(MobileNetV2ImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class MobileNetV2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use MobileNetV2ImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for MobileNetV2."""
16
+
17
+ from typing import Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_STANDARD_MEAN,
29
+ IMAGENET_STANDARD_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
42
+
43
+
44
+ if is_torch_available():
45
+ import torch
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ class MobileNetV2ImageProcessor(BaseImageProcessor):
52
+ r"""
53
+ Constructs a MobileNetV2 image processor.
54
+
55
+ Args:
56
+ do_resize (`bool`, *optional*, defaults to `True`):
57
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
58
+ `do_resize` in the `preprocess` method.
59
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
60
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
61
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
62
+ method.
63
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
64
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
65
+ `preprocess` method.
66
+ do_center_crop (`bool`, *optional*, defaults to `True`):
67
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
68
+ is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
69
+ `preprocess` method.
70
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
71
+ Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
72
+ Can be overridden by the `crop_size` parameter in the `preprocess` method.
73
+ do_rescale (`bool`, *optional*, defaults to `True`):
74
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
75
+ parameter in the `preprocess` method.
76
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
77
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
78
+ `preprocess` method.
79
+ do_normalize:
80
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
81
+ method.
82
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
83
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
84
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
85
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
86
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
87
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
88
+ """
89
+
90
+ model_input_names = ["pixel_values"]
91
+
92
+ def __init__(
93
+ self,
94
+ do_resize: bool = True,
95
+ size: Optional[Dict[str, int]] = None,
96
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
97
+ do_center_crop: bool = True,
98
+ crop_size: Dict[str, int] = None,
99
+ do_rescale: bool = True,
100
+ rescale_factor: Union[int, float] = 1 / 255,
101
+ do_normalize: bool = True,
102
+ image_mean: Optional[Union[float, List[float]]] = None,
103
+ image_std: Optional[Union[float, List[float]]] = None,
104
+ **kwargs,
105
+ ) -> None:
106
+ super().__init__(**kwargs)
107
+ size = size if size is not None else {"shortest_edge": 256}
108
+ size = get_size_dict(size, default_to_square=False)
109
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
110
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
111
+ self.do_resize = do_resize
112
+ self.size = size
113
+ self.resample = resample
114
+ self.do_center_crop = do_center_crop
115
+ self.crop_size = crop_size
116
+ self.do_rescale = do_rescale
117
+ self.rescale_factor = rescale_factor
118
+ self.do_normalize = do_normalize
119
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
120
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
121
+ self._valid_processor_keys = [
122
+ "images",
123
+ "do_resize",
124
+ "size",
125
+ "resample",
126
+ "do_center_crop",
127
+ "crop_size",
128
+ "do_rescale",
129
+ "rescale_factor",
130
+ "do_normalize",
131
+ "image_mean",
132
+ "image_std",
133
+ "return_tensors",
134
+ "data_format",
135
+ "input_data_format",
136
+ ]
137
+
138
+ # Copied from transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.resize
139
+ def resize(
140
+ self,
141
+ image: np.ndarray,
142
+ size: Dict[str, int],
143
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
144
+ data_format: Optional[Union[str, ChannelDimension]] = None,
145
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
146
+ **kwargs,
147
+ ) -> np.ndarray:
148
+ """
149
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
150
+ resized to keep the input aspect ratio.
151
+
152
+ Args:
153
+ image (`np.ndarray`):
154
+ Image to resize.
155
+ size (`Dict[str, int]`):
156
+ Size of the output image.
157
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
158
+ Resampling filter to use when resiizing the image.
159
+ data_format (`str` or `ChannelDimension`, *optional*):
160
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
161
+ input_data_format (`ChannelDimension` or `str`, *optional*):
162
+ The channel dimension format of the input image. If not provided, it will be inferred.
163
+ """
164
+ default_to_square = True
165
+ if "shortest_edge" in size:
166
+ size = size["shortest_edge"]
167
+ default_to_square = False
168
+ elif "height" in size and "width" in size:
169
+ size = (size["height"], size["width"])
170
+ else:
171
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
172
+
173
+ output_size = get_resize_output_image_size(
174
+ image,
175
+ size=size,
176
+ default_to_square=default_to_square,
177
+ input_data_format=input_data_format,
178
+ )
179
+ return resize(
180
+ image,
181
+ size=output_size,
182
+ resample=resample,
183
+ data_format=data_format,
184
+ input_data_format=input_data_format,
185
+ **kwargs,
186
+ )
187
+
188
+ def preprocess(
189
+ self,
190
+ images: ImageInput,
191
+ do_resize: Optional[bool] = None,
192
+ size: Dict[str, int] = None,
193
+ resample: PILImageResampling = None,
194
+ do_center_crop: bool = None,
195
+ crop_size: Dict[str, int] = None,
196
+ do_rescale: Optional[bool] = None,
197
+ rescale_factor: Optional[float] = None,
198
+ do_normalize: Optional[bool] = None,
199
+ image_mean: Optional[Union[float, List[float]]] = None,
200
+ image_std: Optional[Union[float, List[float]]] = None,
201
+ return_tensors: Optional[Union[str, TensorType]] = None,
202
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
203
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
204
+ **kwargs,
205
+ ):
206
+ """
207
+ Preprocess an image or batch of images.
208
+
209
+ Args:
210
+ images (`ImageInput`):
211
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
212
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
213
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
214
+ Whether to resize the image.
215
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
216
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
217
+ the longest edge resized to keep the input aspect ratio.
218
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
219
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
220
+ an effect if `do_resize` is set to `True`.
221
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
222
+ Whether to center crop the image.
223
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
224
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
225
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
226
+ Whether to rescale the image values between [0 - 1].
227
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
228
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
229
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
230
+ Whether to normalize the image.
231
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
232
+ Image mean to use if `do_normalize` is set to `True`.
233
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
234
+ Image standard deviation to use if `do_normalize` is set to `True`.
235
+ return_tensors (`str` or `TensorType`, *optional*):
236
+ The type of tensors to return. Can be one of:
237
+ - Unset: Return a list of `np.ndarray`.
238
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
239
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
240
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
241
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
242
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
243
+ The channel dimension format for the output image. Can be one of:
244
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
245
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
246
+ - Unset: Use the channel dimension format of the input image.
247
+ input_data_format (`ChannelDimension` or `str`, *optional*):
248
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
249
+ from the input image. Can be one of:
250
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
251
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
252
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
253
+ """
254
+ do_resize = do_resize if do_resize is not None else self.do_resize
255
+ size = size if size is not None else self.size
256
+ size = get_size_dict(size, default_to_square=False)
257
+ resample = resample if resample is not None else self.resample
258
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
259
+ crop_size = crop_size if crop_size is not None else self.crop_size
260
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
261
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
262
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
263
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
264
+ image_mean = image_mean if image_mean is not None else self.image_mean
265
+ image_std = image_std if image_std is not None else self.image_std
266
+
267
+ images = make_list_of_images(images)
268
+
269
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
270
+
271
+ if not valid_images(images):
272
+ raise ValueError(
273
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
274
+ "torch.Tensor, tf.Tensor or jax.ndarray."
275
+ )
276
+ validate_preprocess_arguments(
277
+ do_rescale=do_rescale,
278
+ rescale_factor=rescale_factor,
279
+ do_normalize=do_normalize,
280
+ image_mean=image_mean,
281
+ image_std=image_std,
282
+ do_center_crop=do_center_crop,
283
+ crop_size=crop_size,
284
+ do_resize=do_resize,
285
+ size=size,
286
+ resample=resample,
287
+ )
288
+ # All transformations expect numpy arrays.
289
+ images = [to_numpy_array(image) for image in images]
290
+
291
+ if is_scaled_image(images[0]) and do_rescale:
292
+ logger.warning_once(
293
+ "It looks like you are trying to rescale already rescaled images. If the input"
294
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
295
+ )
296
+
297
+ if input_data_format is None:
298
+ # We assume that all images have the same channel dimension format.
299
+ input_data_format = infer_channel_dimension_format(images[0])
300
+
301
+ if do_resize:
302
+ images = [
303
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
304
+ for image in images
305
+ ]
306
+
307
+ if do_center_crop:
308
+ images = [
309
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
310
+ ]
311
+
312
+ if do_rescale:
313
+ images = [
314
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
315
+ for image in images
316
+ ]
317
+
318
+ if do_normalize:
319
+ images = [
320
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
321
+ for image in images
322
+ ]
323
+
324
+ images = [
325
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
326
+ ]
327
+
328
+ data = {"pixel_values": images}
329
+ return BatchFeature(data=data, tensor_type=return_tensors)
330
+
331
+ # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileNetV2
332
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
333
+ """
334
+ Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
335
+
336
+ Args:
337
+ outputs ([`MobileNetV2ForSemanticSegmentation`]):
338
+ Raw outputs of the model.
339
+ target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
340
+ List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
341
+ predictions will not be resized.
342
+
343
+ Returns:
344
+ semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
345
+ segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
346
+ specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
347
+ """
348
+ # TODO: add support for other frameworks
349
+ logits = outputs.logits
350
+
351
+ # Resize logits and compute semantic segmentation maps
352
+ if target_sizes is not None:
353
+ if len(logits) != len(target_sizes):
354
+ raise ValueError(
355
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
356
+ )
357
+
358
+ if is_torch_tensor(target_sizes):
359
+ target_sizes = target_sizes.numpy()
360
+
361
+ semantic_segmentation = []
362
+
363
+ for idx in range(len(logits)):
364
+ resized_logits = torch.nn.functional.interpolate(
365
+ logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
366
+ )
367
+ semantic_map = resized_logits[0].argmax(dim=0)
368
+ semantic_segmentation.append(semantic_map)
369
+ else:
370
+ semantic_segmentation = logits.argmax(dim=1)
371
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
372
+
373
+ return semantic_segmentation
llmeval-env/lib/python3.10/site-packages/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py ADDED
@@ -0,0 +1,862 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch MobileNetV2 model."""
16
+
17
+
18
+ from typing import Optional, Union
19
+
20
+ import torch
21
+ from torch import nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BaseModelOutputWithPoolingAndNoAttention,
27
+ ImageClassifierOutputWithNoAttention,
28
+ SemanticSegmenterOutput,
29
+ )
30
+ from ...modeling_utils import PreTrainedModel
31
+ from ...utils import (
32
+ add_code_sample_docstrings,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_mobilenet_v2 import MobileNetV2Config
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+
44
+ # General docstring
45
+ _CONFIG_FOR_DOC = "MobileNetV2Config"
46
+
47
+ # Base docstring
48
+ _CHECKPOINT_FOR_DOC = "google/mobilenet_v2_1.0_224"
49
+ _EXPECTED_OUTPUT_SHAPE = [1, 1280, 7, 7]
50
+
51
+ # Image classification docstring
52
+ _IMAGE_CLASS_CHECKPOINT = "google/mobilenet_v2_1.0_224"
53
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
54
+
55
+
56
+ from ..deprecated._archive_maps import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ def _build_tf_to_pytorch_map(model, config, tf_weights=None):
60
+ """
61
+ A map of modules from TF to PyTorch.
62
+ """
63
+
64
+ tf_to_pt_map = {}
65
+
66
+ if isinstance(model, (MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation)):
67
+ backbone = model.mobilenet_v2
68
+ else:
69
+ backbone = model
70
+
71
+ # Use the EMA weights if available
72
+ def ema(x):
73
+ return x + "/ExponentialMovingAverage" if x + "/ExponentialMovingAverage" in tf_weights else x
74
+
75
+ prefix = "MobilenetV2/Conv/"
76
+ tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_stem.first_conv.convolution.weight
77
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.first_conv.normalization.bias
78
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.first_conv.normalization.weight
79
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.first_conv.normalization.running_mean
80
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.first_conv.normalization.running_var
81
+
82
+ prefix = "MobilenetV2/expanded_conv/depthwise/"
83
+ tf_to_pt_map[ema(prefix + "depthwise_weights")] = backbone.conv_stem.conv_3x3.convolution.weight
84
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.conv_3x3.normalization.bias
85
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.conv_3x3.normalization.weight
86
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.conv_3x3.normalization.running_mean
87
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.conv_3x3.normalization.running_var
88
+
89
+ prefix = "MobilenetV2/expanded_conv/project/"
90
+ tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_stem.reduce_1x1.convolution.weight
91
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_stem.reduce_1x1.normalization.bias
92
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_stem.reduce_1x1.normalization.weight
93
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.reduce_1x1.normalization.running_mean
94
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.reduce_1x1.normalization.running_var
95
+
96
+ for i in range(16):
97
+ tf_index = i + 1
98
+ pt_index = i
99
+ pointer = backbone.layer[pt_index]
100
+
101
+ prefix = f"MobilenetV2/expanded_conv_{tf_index}/expand/"
102
+ tf_to_pt_map[ema(prefix + "weights")] = pointer.expand_1x1.convolution.weight
103
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.expand_1x1.normalization.bias
104
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.expand_1x1.normalization.weight
105
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.expand_1x1.normalization.running_mean
106
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.expand_1x1.normalization.running_var
107
+
108
+ prefix = f"MobilenetV2/expanded_conv_{tf_index}/depthwise/"
109
+ tf_to_pt_map[ema(prefix + "depthwise_weights")] = pointer.conv_3x3.convolution.weight
110
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.conv_3x3.normalization.bias
111
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.conv_3x3.normalization.weight
112
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.conv_3x3.normalization.running_mean
113
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.conv_3x3.normalization.running_var
114
+
115
+ prefix = f"MobilenetV2/expanded_conv_{tf_index}/project/"
116
+ tf_to_pt_map[ema(prefix + "weights")] = pointer.reduce_1x1.convolution.weight
117
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = pointer.reduce_1x1.normalization.bias
118
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = pointer.reduce_1x1.normalization.weight
119
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.reduce_1x1.normalization.running_mean
120
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.reduce_1x1.normalization.running_var
121
+
122
+ prefix = "MobilenetV2/Conv_1/"
123
+ tf_to_pt_map[ema(prefix + "weights")] = backbone.conv_1x1.convolution.weight
124
+ tf_to_pt_map[ema(prefix + "BatchNorm/beta")] = backbone.conv_1x1.normalization.bias
125
+ tf_to_pt_map[ema(prefix + "BatchNorm/gamma")] = backbone.conv_1x1.normalization.weight
126
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_1x1.normalization.running_mean
127
+ tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_1x1.normalization.running_var
128
+
129
+ if isinstance(model, MobileNetV2ForImageClassification):
130
+ prefix = "MobilenetV2/Logits/Conv2d_1c_1x1/"
131
+ tf_to_pt_map[ema(prefix + "weights")] = model.classifier.weight
132
+ tf_to_pt_map[ema(prefix + "biases")] = model.classifier.bias
133
+
134
+ if isinstance(model, MobileNetV2ForSemanticSegmentation):
135
+ prefix = "image_pooling/"
136
+ tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_pool.convolution.weight
137
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_pool.normalization.bias
138
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_pool.normalization.weight
139
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = model.segmentation_head.conv_pool.normalization.running_mean
140
+ tf_to_pt_map[
141
+ prefix + "BatchNorm/moving_variance"
142
+ ] = model.segmentation_head.conv_pool.normalization.running_var
143
+
144
+ prefix = "aspp0/"
145
+ tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_aspp.convolution.weight
146
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_aspp.normalization.bias
147
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_aspp.normalization.weight
148
+ tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = model.segmentation_head.conv_aspp.normalization.running_mean
149
+ tf_to_pt_map[
150
+ prefix + "BatchNorm/moving_variance"
151
+ ] = model.segmentation_head.conv_aspp.normalization.running_var
152
+
153
+ prefix = "concat_projection/"
154
+ tf_to_pt_map[prefix + "weights"] = model.segmentation_head.conv_projection.convolution.weight
155
+ tf_to_pt_map[prefix + "BatchNorm/beta"] = model.segmentation_head.conv_projection.normalization.bias
156
+ tf_to_pt_map[prefix + "BatchNorm/gamma"] = model.segmentation_head.conv_projection.normalization.weight
157
+ tf_to_pt_map[
158
+ prefix + "BatchNorm/moving_mean"
159
+ ] = model.segmentation_head.conv_projection.normalization.running_mean
160
+ tf_to_pt_map[
161
+ prefix + "BatchNorm/moving_variance"
162
+ ] = model.segmentation_head.conv_projection.normalization.running_var
163
+
164
+ prefix = "logits/semantic/"
165
+ tf_to_pt_map[ema(prefix + "weights")] = model.segmentation_head.classifier.convolution.weight
166
+ tf_to_pt_map[ema(prefix + "biases")] = model.segmentation_head.classifier.convolution.bias
167
+
168
+ return tf_to_pt_map
169
+
170
+
171
+ def load_tf_weights_in_mobilenet_v2(model, config, tf_checkpoint_path):
172
+ """Load TensorFlow checkpoints in a PyTorch model."""
173
+ try:
174
+ import numpy as np
175
+ import tensorflow as tf
176
+ except ImportError:
177
+ logger.error(
178
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
179
+ "https://www.tensorflow.org/install/ for installation instructions."
180
+ )
181
+ raise
182
+
183
+ # Load weights from TF model
184
+ init_vars = tf.train.list_variables(tf_checkpoint_path)
185
+ tf_weights = {}
186
+ for name, shape in init_vars:
187
+ logger.info(f"Loading TF weight {name} with shape {shape}")
188
+ array = tf.train.load_variable(tf_checkpoint_path, name)
189
+ tf_weights[name] = array
190
+
191
+ # Build TF to PyTorch weights loading map
192
+ tf_to_pt_map = _build_tf_to_pytorch_map(model, config, tf_weights)
193
+
194
+ for name, pointer in tf_to_pt_map.items():
195
+ logger.info(f"Importing {name}")
196
+ if name not in tf_weights:
197
+ logger.info(f"{name} not in tf pre-trained weights, skipping")
198
+ continue
199
+
200
+ array = tf_weights[name]
201
+
202
+ if "depthwise_weights" in name:
203
+ logger.info("Transposing depthwise")
204
+ array = np.transpose(array, (2, 3, 0, 1))
205
+ elif "weights" in name:
206
+ logger.info("Transposing")
207
+ if len(pointer.shape) == 2: # copying into linear layer
208
+ array = array.squeeze().transpose()
209
+ else:
210
+ array = np.transpose(array, (3, 2, 0, 1))
211
+
212
+ if pointer.shape != array.shape:
213
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
214
+
215
+ logger.info(f"Initialize PyTorch weight {name} {array.shape}")
216
+ pointer.data = torch.from_numpy(array)
217
+
218
+ tf_weights.pop(name, None)
219
+ tf_weights.pop(name + "/RMSProp", None)
220
+ tf_weights.pop(name + "/RMSProp_1", None)
221
+ tf_weights.pop(name + "/ExponentialMovingAverage", None)
222
+ tf_weights.pop(name + "/Momentum", None)
223
+
224
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
225
+ return model
226
+
227
+
228
+ def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
229
+ """
230
+ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
231
+ original TensorFlow repo. It can be seen here:
232
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
233
+ """
234
+ if min_value is None:
235
+ min_value = divisor
236
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
237
+ # Make sure that round down does not go down by more than 10%.
238
+ if new_value < 0.9 * value:
239
+ new_value += divisor
240
+ return int(new_value)
241
+
242
+
243
+ def apply_depth_multiplier(config: MobileNetV2Config, channels: int) -> int:
244
+ return make_divisible(int(round(channels * config.depth_multiplier)), config.depth_divisible_by, config.min_depth)
245
+
246
+
247
+ def apply_tf_padding(features: torch.Tensor, conv_layer: nn.Conv2d) -> torch.Tensor:
248
+ """
249
+ Apply TensorFlow-style "SAME" padding to a convolution layer. See the notes at:
250
+ https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2
251
+ """
252
+ in_height = int(features.shape[-2])
253
+ in_width = int(features.shape[-1])
254
+ stride_height, stride_width = conv_layer.stride
255
+ kernel_height, kernel_width = conv_layer.kernel_size
256
+ dilation_height, dilation_width = conv_layer.dilation
257
+
258
+ if in_height % stride_height == 0:
259
+ pad_along_height = max(kernel_height - stride_height, 0)
260
+ else:
261
+ pad_along_height = max(kernel_height - (in_height % stride_height), 0)
262
+
263
+ if in_width % stride_width == 0:
264
+ pad_along_width = max(kernel_width - stride_width, 0)
265
+ else:
266
+ pad_along_width = max(kernel_width - (in_width % stride_width), 0)
267
+
268
+ pad_left = pad_along_width // 2
269
+ pad_right = pad_along_width - pad_left
270
+ pad_top = pad_along_height // 2
271
+ pad_bottom = pad_along_height - pad_top
272
+
273
+ padding = (
274
+ pad_left * dilation_width,
275
+ pad_right * dilation_width,
276
+ pad_top * dilation_height,
277
+ pad_bottom * dilation_height,
278
+ )
279
+ return nn.functional.pad(features, padding, "constant", 0.0)
280
+
281
+
282
+ class MobileNetV2ConvLayer(nn.Module):
283
+ def __init__(
284
+ self,
285
+ config: MobileNetV2Config,
286
+ in_channels: int,
287
+ out_channels: int,
288
+ kernel_size: int,
289
+ stride: int = 1,
290
+ groups: int = 1,
291
+ bias: bool = False,
292
+ dilation: int = 1,
293
+ use_normalization: bool = True,
294
+ use_activation: Union[bool, str] = True,
295
+ layer_norm_eps: Optional[float] = None,
296
+ ) -> None:
297
+ super().__init__()
298
+ self.config = config
299
+
300
+ if in_channels % groups != 0:
301
+ raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
302
+ if out_channels % groups != 0:
303
+ raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
304
+
305
+ padding = 0 if config.tf_padding else int((kernel_size - 1) / 2) * dilation
306
+
307
+ self.convolution = nn.Conv2d(
308
+ in_channels=in_channels,
309
+ out_channels=out_channels,
310
+ kernel_size=kernel_size,
311
+ stride=stride,
312
+ padding=padding,
313
+ dilation=dilation,
314
+ groups=groups,
315
+ bias=bias,
316
+ padding_mode="zeros",
317
+ )
318
+
319
+ if use_normalization:
320
+ self.normalization = nn.BatchNorm2d(
321
+ num_features=out_channels,
322
+ eps=config.layer_norm_eps if layer_norm_eps is None else layer_norm_eps,
323
+ momentum=0.997,
324
+ affine=True,
325
+ track_running_stats=True,
326
+ )
327
+ else:
328
+ self.normalization = None
329
+
330
+ if use_activation:
331
+ if isinstance(use_activation, str):
332
+ self.activation = ACT2FN[use_activation]
333
+ elif isinstance(config.hidden_act, str):
334
+ self.activation = ACT2FN[config.hidden_act]
335
+ else:
336
+ self.activation = config.hidden_act
337
+ else:
338
+ self.activation = None
339
+
340
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
341
+ if self.config.tf_padding:
342
+ features = apply_tf_padding(features, self.convolution)
343
+ features = self.convolution(features)
344
+ if self.normalization is not None:
345
+ features = self.normalization(features)
346
+ if self.activation is not None:
347
+ features = self.activation(features)
348
+ return features
349
+
350
+
351
+ class MobileNetV2InvertedResidual(nn.Module):
352
+ def __init__(
353
+ self, config: MobileNetV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1
354
+ ) -> None:
355
+ super().__init__()
356
+
357
+ expanded_channels = make_divisible(
358
+ int(round(in_channels * config.expand_ratio)), config.depth_divisible_by, config.min_depth
359
+ )
360
+
361
+ if stride not in [1, 2]:
362
+ raise ValueError(f"Invalid stride {stride}.")
363
+
364
+ self.use_residual = (stride == 1) and (in_channels == out_channels)
365
+
366
+ self.expand_1x1 = MobileNetV2ConvLayer(
367
+ config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
368
+ )
369
+
370
+ self.conv_3x3 = MobileNetV2ConvLayer(
371
+ config,
372
+ in_channels=expanded_channels,
373
+ out_channels=expanded_channels,
374
+ kernel_size=3,
375
+ stride=stride,
376
+ groups=expanded_channels,
377
+ dilation=dilation,
378
+ )
379
+
380
+ self.reduce_1x1 = MobileNetV2ConvLayer(
381
+ config,
382
+ in_channels=expanded_channels,
383
+ out_channels=out_channels,
384
+ kernel_size=1,
385
+ use_activation=False,
386
+ )
387
+
388
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
389
+ residual = features
390
+
391
+ features = self.expand_1x1(features)
392
+ features = self.conv_3x3(features)
393
+ features = self.reduce_1x1(features)
394
+
395
+ return residual + features if self.use_residual else features
396
+
397
+
398
+ class MobileNetV2Stem(nn.Module):
399
+ def __init__(self, config: MobileNetV2Config, in_channels: int, expanded_channels: int, out_channels: int) -> None:
400
+ super().__init__()
401
+
402
+ # The very first layer is a regular 3x3 convolution with stride 2 that expands to 32 channels.
403
+ # All other expansion layers use the expansion factor to compute the number of output channels.
404
+ self.first_conv = MobileNetV2ConvLayer(
405
+ config,
406
+ in_channels=in_channels,
407
+ out_channels=expanded_channels,
408
+ kernel_size=3,
409
+ stride=2,
410
+ )
411
+
412
+ if config.first_layer_is_expansion:
413
+ self.expand_1x1 = None
414
+ else:
415
+ self.expand_1x1 = MobileNetV2ConvLayer(
416
+ config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=1
417
+ )
418
+
419
+ self.conv_3x3 = MobileNetV2ConvLayer(
420
+ config,
421
+ in_channels=expanded_channels,
422
+ out_channels=expanded_channels,
423
+ kernel_size=3,
424
+ stride=1,
425
+ groups=expanded_channels,
426
+ )
427
+
428
+ self.reduce_1x1 = MobileNetV2ConvLayer(
429
+ config,
430
+ in_channels=expanded_channels,
431
+ out_channels=out_channels,
432
+ kernel_size=1,
433
+ use_activation=False,
434
+ )
435
+
436
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
437
+ features = self.first_conv(features)
438
+ if self.expand_1x1 is not None:
439
+ features = self.expand_1x1(features)
440
+ features = self.conv_3x3(features)
441
+ features = self.reduce_1x1(features)
442
+ return features
443
+
444
+
445
+ class MobileNetV2PreTrainedModel(PreTrainedModel):
446
+ """
447
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
448
+ models.
449
+ """
450
+
451
+ config_class = MobileNetV2Config
452
+ load_tf_weights = load_tf_weights_in_mobilenet_v2
453
+ base_model_prefix = "mobilenet_v2"
454
+ main_input_name = "pixel_values"
455
+ supports_gradient_checkpointing = False
456
+
457
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None:
458
+ """Initialize the weights"""
459
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
460
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
461
+ if module.bias is not None:
462
+ module.bias.data.zero_()
463
+ elif isinstance(module, nn.BatchNorm2d):
464
+ module.bias.data.zero_()
465
+ module.weight.data.fill_(1.0)
466
+
467
+
468
+ MOBILENET_V2_START_DOCSTRING = r"""
469
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
470
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
471
+ behavior.
472
+
473
+ Parameters:
474
+ config ([`MobileNetV2Config`]): Model configuration class with all the parameters of the model.
475
+ Initializing with a config file does not load the weights associated with the model, only the
476
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
477
+ """
478
+
479
+ MOBILENET_V2_INPUTS_DOCSTRING = r"""
480
+ Args:
481
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
482
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
483
+ [`MobileNetV2ImageProcessor.__call__`] for details.
484
+ output_hidden_states (`bool`, *optional*):
485
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
486
+ more detail.
487
+ return_dict (`bool`, *optional*):
488
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
489
+ """
490
+
491
+
492
+ @add_start_docstrings(
493
+ "The bare MobileNetV2 model outputting raw hidden-states without any specific head on top.",
494
+ MOBILENET_V2_START_DOCSTRING,
495
+ )
496
+ class MobileNetV2Model(MobileNetV2PreTrainedModel):
497
+ def __init__(self, config: MobileNetV2Config, add_pooling_layer: bool = True):
498
+ super().__init__(config)
499
+ self.config = config
500
+
501
+ # Output channels for the projection layers
502
+ channels = [16, 24, 24, 32, 32, 32, 64, 64, 64, 64, 96, 96, 96, 160, 160, 160, 320]
503
+ channels = [apply_depth_multiplier(config, x) for x in channels]
504
+
505
+ # Strides for the depthwise layers
506
+ strides = [2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]
507
+
508
+ self.conv_stem = MobileNetV2Stem(
509
+ config,
510
+ in_channels=config.num_channels,
511
+ expanded_channels=apply_depth_multiplier(config, 32),
512
+ out_channels=channels[0],
513
+ )
514
+
515
+ current_stride = 2 # first conv layer has stride 2
516
+ dilation = 1
517
+
518
+ self.layer = nn.ModuleList()
519
+ for i in range(16):
520
+ # Keep making the feature maps smaller or use dilated convolution?
521
+ if current_stride == config.output_stride:
522
+ layer_stride = 1
523
+ layer_dilation = dilation
524
+ dilation *= strides[i] # larger dilation starts in next block
525
+ else:
526
+ layer_stride = strides[i]
527
+ layer_dilation = 1
528
+ current_stride *= layer_stride
529
+
530
+ self.layer.append(
531
+ MobileNetV2InvertedResidual(
532
+ config,
533
+ in_channels=channels[i],
534
+ out_channels=channels[i + 1],
535
+ stride=layer_stride,
536
+ dilation=layer_dilation,
537
+ )
538
+ )
539
+
540
+ if config.finegrained_output and config.depth_multiplier < 1.0:
541
+ output_channels = 1280
542
+ else:
543
+ output_channels = apply_depth_multiplier(config, 1280)
544
+
545
+ self.conv_1x1 = MobileNetV2ConvLayer(
546
+ config,
547
+ in_channels=channels[-1],
548
+ out_channels=output_channels,
549
+ kernel_size=1,
550
+ )
551
+
552
+ self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None
553
+
554
+ # Initialize weights and apply final processing
555
+ self.post_init()
556
+
557
+ def _prune_heads(self, heads_to_prune):
558
+ raise NotImplementedError
559
+
560
+ @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING)
561
+ @add_code_sample_docstrings(
562
+ checkpoint=_CHECKPOINT_FOR_DOC,
563
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
564
+ config_class=_CONFIG_FOR_DOC,
565
+ modality="vision",
566
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
567
+ )
568
+ def forward(
569
+ self,
570
+ pixel_values: Optional[torch.Tensor] = None,
571
+ output_hidden_states: Optional[bool] = None,
572
+ return_dict: Optional[bool] = None,
573
+ ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
574
+ output_hidden_states = (
575
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
576
+ )
577
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
578
+
579
+ if pixel_values is None:
580
+ raise ValueError("You have to specify pixel_values")
581
+
582
+ hidden_states = self.conv_stem(pixel_values)
583
+
584
+ all_hidden_states = () if output_hidden_states else None
585
+
586
+ for i, layer_module in enumerate(self.layer):
587
+ hidden_states = layer_module(hidden_states)
588
+
589
+ if output_hidden_states:
590
+ all_hidden_states = all_hidden_states + (hidden_states,)
591
+
592
+ last_hidden_state = self.conv_1x1(hidden_states)
593
+
594
+ if self.pooler is not None:
595
+ pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1)
596
+ else:
597
+ pooled_output = None
598
+
599
+ if not return_dict:
600
+ return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
601
+
602
+ return BaseModelOutputWithPoolingAndNoAttention(
603
+ last_hidden_state=last_hidden_state,
604
+ pooler_output=pooled_output,
605
+ hidden_states=all_hidden_states,
606
+ )
607
+
608
+
609
+ @add_start_docstrings(
610
+ """
611
+ MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
612
+ ImageNet.
613
+ """,
614
+ MOBILENET_V2_START_DOCSTRING,
615
+ )
616
+ class MobileNetV2ForImageClassification(MobileNetV2PreTrainedModel):
617
+ def __init__(self, config: MobileNetV2Config) -> None:
618
+ super().__init__(config)
619
+
620
+ self.num_labels = config.num_labels
621
+ self.mobilenet_v2 = MobileNetV2Model(config)
622
+
623
+ last_hidden_size = self.mobilenet_v2.conv_1x1.convolution.out_channels
624
+
625
+ # Classifier head
626
+ self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
627
+ self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
628
+
629
+ # Initialize weights and apply final processing
630
+ self.post_init()
631
+
632
+ @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING)
633
+ @add_code_sample_docstrings(
634
+ checkpoint=_CHECKPOINT_FOR_DOC,
635
+ output_type=ImageClassifierOutputWithNoAttention,
636
+ config_class=_CONFIG_FOR_DOC,
637
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
638
+ )
639
+ def forward(
640
+ self,
641
+ pixel_values: Optional[torch.Tensor] = None,
642
+ output_hidden_states: Optional[bool] = None,
643
+ labels: Optional[torch.Tensor] = None,
644
+ return_dict: Optional[bool] = None,
645
+ ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
646
+ r"""
647
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
648
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
649
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
650
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
651
+ """
652
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
653
+
654
+ outputs = self.mobilenet_v2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
655
+
656
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
657
+
658
+ logits = self.classifier(self.dropout(pooled_output))
659
+
660
+ loss = None
661
+ if labels is not None:
662
+ if self.config.problem_type is None:
663
+ if self.num_labels == 1:
664
+ self.config.problem_type = "regression"
665
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
666
+ self.config.problem_type = "single_label_classification"
667
+ else:
668
+ self.config.problem_type = "multi_label_classification"
669
+
670
+ if self.config.problem_type == "regression":
671
+ loss_fct = MSELoss()
672
+ if self.num_labels == 1:
673
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
674
+ else:
675
+ loss = loss_fct(logits, labels)
676
+ elif self.config.problem_type == "single_label_classification":
677
+ loss_fct = CrossEntropyLoss()
678
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
679
+ elif self.config.problem_type == "multi_label_classification":
680
+ loss_fct = BCEWithLogitsLoss()
681
+ loss = loss_fct(logits, labels)
682
+
683
+ if not return_dict:
684
+ output = (logits,) + outputs[2:]
685
+ return ((loss,) + output) if loss is not None else output
686
+
687
+ return ImageClassifierOutputWithNoAttention(
688
+ loss=loss,
689
+ logits=logits,
690
+ hidden_states=outputs.hidden_states,
691
+ )
692
+
693
+
694
+ class MobileNetV2DeepLabV3Plus(nn.Module):
695
+ """
696
+ The neural network from the paper "Encoder-Decoder with Atrous Separable Convolution for Semantic Image
697
+ Segmentation" https://arxiv.org/abs/1802.02611
698
+ """
699
+
700
+ def __init__(self, config: MobileNetV2Config) -> None:
701
+ super().__init__()
702
+
703
+ self.avg_pool = nn.AdaptiveAvgPool2d(output_size=1)
704
+
705
+ self.conv_pool = MobileNetV2ConvLayer(
706
+ config,
707
+ in_channels=apply_depth_multiplier(config, 320),
708
+ out_channels=256,
709
+ kernel_size=1,
710
+ stride=1,
711
+ use_normalization=True,
712
+ use_activation="relu",
713
+ layer_norm_eps=1e-5,
714
+ )
715
+
716
+ self.conv_aspp = MobileNetV2ConvLayer(
717
+ config,
718
+ in_channels=apply_depth_multiplier(config, 320),
719
+ out_channels=256,
720
+ kernel_size=1,
721
+ stride=1,
722
+ use_normalization=True,
723
+ use_activation="relu",
724
+ layer_norm_eps=1e-5,
725
+ )
726
+
727
+ self.conv_projection = MobileNetV2ConvLayer(
728
+ config,
729
+ in_channels=512,
730
+ out_channels=256,
731
+ kernel_size=1,
732
+ stride=1,
733
+ use_normalization=True,
734
+ use_activation="relu",
735
+ layer_norm_eps=1e-5,
736
+ )
737
+
738
+ self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
739
+
740
+ self.classifier = MobileNetV2ConvLayer(
741
+ config,
742
+ in_channels=256,
743
+ out_channels=config.num_labels,
744
+ kernel_size=1,
745
+ use_normalization=False,
746
+ use_activation=False,
747
+ bias=True,
748
+ )
749
+
750
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
751
+ spatial_size = features.shape[-2:]
752
+
753
+ features_pool = self.avg_pool(features)
754
+ features_pool = self.conv_pool(features_pool)
755
+ features_pool = nn.functional.interpolate(
756
+ features_pool, size=spatial_size, mode="bilinear", align_corners=True
757
+ )
758
+
759
+ features_aspp = self.conv_aspp(features)
760
+
761
+ features = torch.cat([features_pool, features_aspp], dim=1)
762
+
763
+ features = self.conv_projection(features)
764
+ features = self.dropout(features)
765
+ features = self.classifier(features)
766
+ return features
767
+
768
+
769
+ @add_start_docstrings(
770
+ """
771
+ MobileNetV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.
772
+ """,
773
+ MOBILENET_V2_START_DOCSTRING,
774
+ )
775
+ class MobileNetV2ForSemanticSegmentation(MobileNetV2PreTrainedModel):
776
+ def __init__(self, config: MobileNetV2Config) -> None:
777
+ super().__init__(config)
778
+
779
+ self.num_labels = config.num_labels
780
+ self.mobilenet_v2 = MobileNetV2Model(config, add_pooling_layer=False)
781
+ self.segmentation_head = MobileNetV2DeepLabV3Plus(config)
782
+
783
+ # Initialize weights and apply final processing
784
+ self.post_init()
785
+
786
+ @add_start_docstrings_to_model_forward(MOBILENET_V2_INPUTS_DOCSTRING)
787
+ @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
788
+ def forward(
789
+ self,
790
+ pixel_values: Optional[torch.Tensor] = None,
791
+ labels: Optional[torch.Tensor] = None,
792
+ output_hidden_states: Optional[bool] = None,
793
+ return_dict: Optional[bool] = None,
794
+ ) -> Union[tuple, SemanticSegmenterOutput]:
795
+ r"""
796
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
797
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
798
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
799
+
800
+ Returns:
801
+
802
+ Examples:
803
+
804
+ ```python
805
+ >>> from transformers import AutoImageProcessor, MobileNetV2ForSemanticSegmentation
806
+ >>> from PIL import Image
807
+ >>> import requests
808
+
809
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
810
+ >>> image = Image.open(requests.get(url, stream=True).raw)
811
+
812
+ >>> image_processor = AutoImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
813
+ >>> model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
814
+
815
+ >>> inputs = image_processor(images=image, return_tensors="pt")
816
+
817
+ >>> with torch.no_grad():
818
+ ... outputs = model(**inputs)
819
+
820
+ >>> # logits are of shape (batch_size, num_labels, height, width)
821
+ >>> logits = outputs.logits
822
+ ```"""
823
+ output_hidden_states = (
824
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
825
+ )
826
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
827
+
828
+ outputs = self.mobilenet_v2(
829
+ pixel_values,
830
+ output_hidden_states=True, # we need the intermediate hidden states
831
+ return_dict=return_dict,
832
+ )
833
+
834
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
835
+
836
+ logits = self.segmentation_head(encoder_hidden_states[-1])
837
+
838
+ loss = None
839
+ if labels is not None:
840
+ if self.config.num_labels == 1:
841
+ raise ValueError("The number of labels should be greater than one")
842
+ else:
843
+ # upsample logits to the images' original size
844
+ upsampled_logits = nn.functional.interpolate(
845
+ logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
846
+ )
847
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
848
+ loss = loss_fct(upsampled_logits, labels)
849
+
850
+ if not return_dict:
851
+ if output_hidden_states:
852
+ output = (logits,) + outputs[1:]
853
+ else:
854
+ output = (logits,) + outputs[2:]
855
+ return ((loss,) + output) if loss is not None else output
856
+
857
+ return SemanticSegmenterOutput(
858
+ loss=loss,
859
+ logits=logits,
860
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
861
+ attentions=None,
862
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/qdqbert/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {"configuration_qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"]}
20
+
21
+ try:
22
+ if not is_torch_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["modeling_qdqbert"] = [
28
+ "QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
29
+ "QDQBertForMaskedLM",
30
+ "QDQBertForMultipleChoice",
31
+ "QDQBertForNextSentencePrediction",
32
+ "QDQBertForQuestionAnswering",
33
+ "QDQBertForSequenceClassification",
34
+ "QDQBertForTokenClassification",
35
+ "QDQBertLayer",
36
+ "QDQBertLMHeadModel",
37
+ "QDQBertModel",
38
+ "QDQBertPreTrainedModel",
39
+ "load_tf_weights_in_qdqbert",
40
+ ]
41
+
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_qdqbert import (
53
+ QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ QDQBertForMaskedLM,
55
+ QDQBertForMultipleChoice,
56
+ QDQBertForNextSentencePrediction,
57
+ QDQBertForQuestionAnswering,
58
+ QDQBertForSequenceClassification,
59
+ QDQBertForTokenClassification,
60
+ QDQBertLayer,
61
+ QDQBertLMHeadModel,
62
+ QDQBertModel,
63
+ QDQBertPreTrainedModel,
64
+ load_tf_weights_in_qdqbert,
65
+ )
66
+
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/configuration_qdqbert.cpython-310.pyc ADDED
Binary file (5.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/qdqbert/configuration_qdqbert.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ QDQBERT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class QDQBertConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`QDQBertModel`]. It is used to instantiate an
30
+ QDQBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the BERT
32
+ [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 30522):
40
+ Vocabulary size of the QDQBERT model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`QDQBertModel`].
42
+ hidden_size (`int`, *optional*, defaults to 768):
43
+ Dimension of the encoder layers and the pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 12):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 12):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 3072):
49
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
51
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
52
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
53
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
54
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
55
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout ratio for the attention probabilities.
57
+ max_position_embeddings (`int`, *optional*, defaults to 512):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ type_vocab_size (`int`, *optional*, defaults to 2):
61
+ The vocabulary size of the `token_type_ids` passed when calling [`QDQBertModel`].
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the layer normalization layers.
66
+ is_decoder (`bool`, *optional*, defaults to `False`):
67
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
68
+ use_cache (`bool`, *optional*, defaults to `True`):
69
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
70
+ relevant if `config.is_decoder=True`.
71
+
72
+ Examples:
73
+
74
+ ```python
75
+ >>> from transformers import QDQBertModel, QDQBertConfig
76
+
77
+ >>> # Initializing a QDQBERT google-bert/bert-base-uncased style configuration
78
+ >>> configuration = QDQBertConfig()
79
+
80
+ >>> # Initializing a model from the google-bert/bert-base-uncased style configuration
81
+ >>> model = QDQBertModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "qdqbert"
88
+
89
+ def __init__(
90
+ self,
91
+ vocab_size=30522,
92
+ hidden_size=768,
93
+ num_hidden_layers=12,
94
+ num_attention_heads=12,
95
+ intermediate_size=3072,
96
+ hidden_act="gelu",
97
+ hidden_dropout_prob=0.1,
98
+ attention_probs_dropout_prob=0.1,
99
+ max_position_embeddings=512,
100
+ type_vocab_size=2,
101
+ initializer_range=0.02,
102
+ layer_norm_eps=1e-12,
103
+ use_cache=True,
104
+ pad_token_id=1,
105
+ bos_token_id=0,
106
+ eos_token_id=2,
107
+ **kwargs,
108
+ ):
109
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
110
+
111
+ self.vocab_size = vocab_size
112
+ self.max_position_embeddings = max_position_embeddings
113
+ self.hidden_size = hidden_size
114
+ self.num_hidden_layers = num_hidden_layers
115
+ self.num_attention_heads = num_attention_heads
116
+ self.intermediate_size = intermediate_size
117
+ self.hidden_act = hidden_act
118
+ self.hidden_dropout_prob = hidden_dropout_prob
119
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
120
+ self.initializer_range = initializer_range
121
+ self.type_vocab_size = type_vocab_size
122
+ self.layer_norm_eps = layer_norm_eps
123
+ self.use_cache = use_cache
llmeval-env/lib/python3.10/site-packages/transformers/models/qdqbert/modeling_qdqbert.py ADDED
@@ -0,0 +1,1737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 NVIDIA Corporation and The HuggingFace Team.
3
+ # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch QDQBERT model."""
17
+
18
+
19
+ import math
20
+ import os
21
+ import warnings
22
+ from typing import Dict, List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ BaseModelOutputWithPoolingAndCrossAttentions,
33
+ CausalLMOutputWithCrossAttentions,
34
+ MaskedLMOutput,
35
+ MultipleChoiceModelOutput,
36
+ NextSentencePredictorOutput,
37
+ QuestionAnsweringModelOutput,
38
+ SequenceClassifierOutput,
39
+ TokenClassifierOutput,
40
+ )
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
43
+ from ...utils import (
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ is_pytorch_quantization_available,
48
+ logging,
49
+ replace_return_docstrings,
50
+ requires_backends,
51
+ )
52
+ from .configuration_qdqbert import QDQBertConfig
53
+
54
+
55
+ logger = logging.get_logger(__name__)
56
+
57
+ # soft dependency
58
+ if is_pytorch_quantization_available():
59
+ try:
60
+ from pytorch_quantization import nn as quant_nn
61
+ from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer
62
+ except OSError:
63
+ logger.error(
64
+ "QDQBERT model are not usable since `pytorch_quantization` can't be loaded. Please try to reinstall it"
65
+ " following the instructions here:"
66
+ " https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
67
+ )
68
+
69
+ _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased"
70
+ _CONFIG_FOR_DOC = "QDQBertConfig"
71
+
72
+
73
+ from ..deprecated._archive_maps import QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
74
+
75
+
76
+ def load_tf_weights_in_qdqbert(model, tf_checkpoint_path):
77
+ """Load tf checkpoints in a pytorch model."""
78
+ try:
79
+ import re
80
+
81
+ import numpy as np
82
+ import tensorflow as tf
83
+ except ImportError:
84
+ logger.error(
85
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
86
+ "https://www.tensorflow.org/install/ for installation instructions."
87
+ )
88
+ raise
89
+ tf_path = os.path.abspath(tf_checkpoint_path)
90
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
91
+ # Load weights from TF model
92
+ init_vars = tf.train.list_variables(tf_path)
93
+ names = []
94
+ arrays = []
95
+ for name, shape in init_vars:
96
+ logger.info(f"Loading TF weight {name} with shape {shape}")
97
+ array = tf.train.load_variable(tf_path, name)
98
+ names.append(name)
99
+ arrays.append(array)
100
+
101
+ for name, array in zip(names, arrays):
102
+ name = name.split("/")
103
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
104
+ # which are not required for using pretrained model
105
+ if any(
106
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
107
+ for n in name
108
+ ):
109
+ logger.info(f"Skipping {'/'.join(name)}")
110
+ continue
111
+ pointer = model
112
+ for m_name in name:
113
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
114
+ scope_names = re.split(r"_(\d+)", m_name)
115
+ else:
116
+ scope_names = [m_name]
117
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
118
+ pointer = getattr(pointer, "weight")
119
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
120
+ pointer = getattr(pointer, "bias")
121
+ elif scope_names[0] == "output_weights":
122
+ pointer = getattr(pointer, "weight")
123
+ elif scope_names[0] == "squad":
124
+ pointer = getattr(pointer, "classifier")
125
+ else:
126
+ try:
127
+ pointer = getattr(pointer, scope_names[0])
128
+ except AttributeError:
129
+ logger.info(f"Skipping {'/'.join(name)}")
130
+ continue
131
+ if len(scope_names) >= 2:
132
+ num = int(scope_names[1])
133
+ pointer = pointer[num]
134
+ if m_name[-11:] == "_embeddings":
135
+ pointer = getattr(pointer, "weight")
136
+ elif m_name == "kernel":
137
+ array = np.transpose(array)
138
+ try:
139
+ if pointer.shape != array.shape:
140
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
141
+ except AssertionError as e:
142
+ e.args += (pointer.shape, array.shape)
143
+ raise
144
+ logger.info(f"Initialize PyTorch weight {name}")
145
+ pointer.data = torch.from_numpy(array)
146
+ return model
147
+
148
+
149
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert -> QDQBert
150
+ class QDQBertEmbeddings(nn.Module):
151
+ """Construct the embeddings from word, position and token_type embeddings."""
152
+
153
+ def __init__(self, config):
154
+ super().__init__()
155
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
156
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
157
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
158
+
159
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
160
+ # any TensorFlow checkpoint file
161
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
162
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
163
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
164
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
165
+ self.register_buffer(
166
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
167
+ )
168
+ self.register_buffer(
169
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
170
+ )
171
+
172
+ def forward(
173
+ self,
174
+ input_ids: Optional[torch.LongTensor] = None,
175
+ token_type_ids: Optional[torch.LongTensor] = None,
176
+ position_ids: Optional[torch.LongTensor] = None,
177
+ inputs_embeds: Optional[torch.FloatTensor] = None,
178
+ past_key_values_length: int = 0,
179
+ ) -> torch.Tensor:
180
+ if input_ids is not None:
181
+ input_shape = input_ids.size()
182
+ else:
183
+ input_shape = inputs_embeds.size()[:-1]
184
+
185
+ seq_length = input_shape[1]
186
+
187
+ if position_ids is None:
188
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
189
+
190
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
191
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
192
+ # issue #5664
193
+ if token_type_ids is None:
194
+ if hasattr(self, "token_type_ids"):
195
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
196
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
197
+ token_type_ids = buffered_token_type_ids_expanded
198
+ else:
199
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
200
+
201
+ if inputs_embeds is None:
202
+ inputs_embeds = self.word_embeddings(input_ids)
203
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
204
+
205
+ embeddings = inputs_embeds + token_type_embeddings
206
+ if self.position_embedding_type == "absolute":
207
+ position_embeddings = self.position_embeddings(position_ids)
208
+ embeddings += position_embeddings
209
+ embeddings = self.LayerNorm(embeddings)
210
+ embeddings = self.dropout(embeddings)
211
+ return embeddings
212
+
213
+
214
+ class QDQBertSelfAttention(nn.Module):
215
+ def __init__(self, config):
216
+ super().__init__()
217
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
218
+ raise ValueError(
219
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
220
+ f"heads ({config.num_attention_heads})"
221
+ )
222
+
223
+ self.num_attention_heads = config.num_attention_heads
224
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
225
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
226
+
227
+ self.query = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
228
+ self.key = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
229
+ self.value = quant_nn.QuantLinear(config.hidden_size, self.all_head_size)
230
+
231
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
232
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
233
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
234
+ self.max_position_embeddings = config.max_position_embeddings
235
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
236
+
237
+ self.is_decoder = config.is_decoder
238
+
239
+ self.matmul_q_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
240
+ self.matmul_k_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
241
+ self.matmul_v_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
242
+ self.matmul_a_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
243
+
244
+ def transpose_for_scores(self, x):
245
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
246
+ x = x.view(*new_x_shape)
247
+ return x.permute(0, 2, 1, 3)
248
+
249
+ def forward(
250
+ self,
251
+ hidden_states,
252
+ attention_mask=None,
253
+ head_mask=None,
254
+ encoder_hidden_states=None,
255
+ encoder_attention_mask=None,
256
+ past_key_value=None,
257
+ output_attentions=False,
258
+ ):
259
+ mixed_query_layer = self.query(hidden_states)
260
+
261
+ # If this is instantiated as a cross-attention module, the keys
262
+ # and values come from an encoder; the attention mask needs to be
263
+ # such that the encoder's padding tokens are not attended to.
264
+ is_cross_attention = encoder_hidden_states is not None
265
+
266
+ if is_cross_attention and past_key_value is not None:
267
+ # reuse k,v, cross_attentions
268
+ key_layer = past_key_value[0]
269
+ value_layer = past_key_value[1]
270
+ attention_mask = encoder_attention_mask
271
+ elif is_cross_attention:
272
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
273
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
274
+ attention_mask = encoder_attention_mask
275
+ elif past_key_value is not None:
276
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
277
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
278
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
279
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
280
+ else:
281
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
282
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
283
+
284
+ query_layer = self.transpose_for_scores(mixed_query_layer)
285
+
286
+ if self.is_decoder:
287
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
288
+ # Further calls to cross_attention layer can then reuse all cross-attention
289
+ # key/value_states (first "if" case)
290
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
291
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
292
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
293
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
294
+ past_key_value = (key_layer, value_layer)
295
+
296
+ # Take the dot product between "query" and "key" to get the raw attention scores.
297
+ attention_scores = torch.matmul(
298
+ self.matmul_q_input_quantizer(query_layer), self.matmul_k_input_quantizer(key_layer.transpose(-1, -2))
299
+ )
300
+
301
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
302
+ seq_length = hidden_states.size()[1]
303
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
304
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
305
+ distance = position_ids_l - position_ids_r
306
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
307
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
308
+
309
+ if self.position_embedding_type == "relative_key":
310
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
311
+ attention_scores = attention_scores + relative_position_scores
312
+ elif self.position_embedding_type == "relative_key_query":
313
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
314
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
315
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
316
+
317
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
318
+ if attention_mask is not None:
319
+ # Apply the attention mask is (precomputed for all layers in QDQBertModel forward() function)
320
+ attention_scores = attention_scores + attention_mask
321
+
322
+ # Normalize the attention scores to probabilities.
323
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
324
+
325
+ # This is actually dropping out entire tokens to attend to, which might
326
+ # seem a bit unusual, but is taken from the original Transformer paper.
327
+ attention_probs = self.dropout(attention_probs)
328
+
329
+ # Mask heads if we want to
330
+ if head_mask is not None:
331
+ attention_probs = attention_probs * head_mask
332
+
333
+ context_layer = torch.matmul(
334
+ self.matmul_a_input_quantizer(attention_probs), self.matmul_v_input_quantizer(value_layer)
335
+ )
336
+
337
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
338
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
339
+ context_layer = context_layer.view(*new_context_layer_shape)
340
+
341
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
342
+
343
+ if self.is_decoder:
344
+ outputs = outputs + (past_key_value,)
345
+ return outputs
346
+
347
+
348
+ class QDQBertSelfOutput(nn.Module):
349
+ def __init__(self, config):
350
+ super().__init__()
351
+ # Quantize Linear layer
352
+ self.dense = quant_nn.QuantLinear(config.hidden_size, config.hidden_size)
353
+
354
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
355
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
356
+
357
+ # Quantize the inputs to the residual add
358
+ self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
359
+ self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
360
+
361
+ def forward(self, hidden_states, input_tensor):
362
+ hidden_states = self.dense(hidden_states)
363
+ hidden_states = self.dropout(hidden_states)
364
+ # Quantize the inputs to the residual add
365
+ add_local = self.add_local_input_quantizer(hidden_states)
366
+ add_residual = self.add_residual_input_quantizer(input_tensor)
367
+ hidden_states = self.LayerNorm(add_local + add_residual)
368
+ return hidden_states
369
+
370
+
371
+ # Based on transformers.models.bert.modeling_bert.BertAttention with Bert -> QDQBert
372
+ class QDQBertAttention(nn.Module):
373
+ def __init__(self, config):
374
+ super().__init__()
375
+ self.self = QDQBertSelfAttention(config)
376
+ self.output = QDQBertSelfOutput(config)
377
+ self.pruned_heads = set()
378
+
379
+ def prune_heads(self, heads):
380
+ if len(heads) == 0:
381
+ return
382
+ heads, index = find_pruneable_heads_and_indices(
383
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
384
+ )
385
+
386
+ # Prune linear layers
387
+ self.self.query = prune_linear_layer(self.self.query, index)
388
+ self.self.key = prune_linear_layer(self.self.key, index)
389
+ self.self.value = prune_linear_layer(self.self.value, index)
390
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
391
+
392
+ # Update hyper params and store pruned heads
393
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
394
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
395
+ self.pruned_heads = self.pruned_heads.union(heads)
396
+
397
+ def forward(
398
+ self,
399
+ hidden_states,
400
+ attention_mask=None,
401
+ head_mask=None,
402
+ encoder_hidden_states=None,
403
+ encoder_attention_mask=None,
404
+ past_key_value=None,
405
+ output_attentions=False,
406
+ ):
407
+ self_outputs = self.self(
408
+ hidden_states,
409
+ attention_mask,
410
+ head_mask,
411
+ encoder_hidden_states,
412
+ encoder_attention_mask,
413
+ past_key_value,
414
+ output_attentions,
415
+ )
416
+ attention_output = self.output(self_outputs[0], hidden_states)
417
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
418
+ return outputs
419
+
420
+
421
+ class QDQBertIntermediate(nn.Module):
422
+ def __init__(self, config):
423
+ super().__init__()
424
+ # Quantize Linear layer
425
+ self.dense = quant_nn.QuantLinear(config.hidden_size, config.intermediate_size)
426
+ if isinstance(config.hidden_act, str):
427
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
428
+ else:
429
+ self.intermediate_act_fn = config.hidden_act
430
+
431
+ def forward(self, hidden_states):
432
+ hidden_states = self.dense(hidden_states)
433
+ hidden_states = self.intermediate_act_fn(hidden_states)
434
+ return hidden_states
435
+
436
+
437
+ class QDQBertOutput(nn.Module):
438
+ def __init__(self, config):
439
+ super().__init__()
440
+ # Quantize Linear layer
441
+ self.dense = quant_nn.QuantLinear(config.intermediate_size, config.hidden_size)
442
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
443
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
444
+
445
+ # Quantize the inputs to the residual add
446
+ self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
447
+ self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input)
448
+
449
+ def forward(self, hidden_states, input_tensor):
450
+ hidden_states = self.dense(hidden_states)
451
+ hidden_states = self.dropout(hidden_states)
452
+ # Quantize the inputs to the residual add
453
+ add_local = self.add_local_input_quantizer(hidden_states)
454
+ add_residual = self.add_residual_input_quantizer(input_tensor)
455
+ hidden_states = self.LayerNorm(add_local + add_residual)
456
+ return hidden_states
457
+
458
+
459
+ # Based on transformers.models.bert.modeling_bert.BertLayer with Bert -> QDQBert
460
+ class QDQBertLayer(nn.Module):
461
+ def __init__(self, config):
462
+ super().__init__()
463
+ self.seq_len_dim = 1
464
+ self.attention = QDQBertAttention(config)
465
+ self.is_decoder = config.is_decoder
466
+ self.add_cross_attention = config.add_cross_attention
467
+ if self.add_cross_attention:
468
+ if not self.is_decoder:
469
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
470
+ self.crossattention = QDQBertAttention(config)
471
+ self.intermediate = QDQBertIntermediate(config)
472
+ self.output = QDQBertOutput(config)
473
+
474
+ def forward(
475
+ self,
476
+ hidden_states,
477
+ attention_mask=None,
478
+ head_mask=None,
479
+ encoder_hidden_states=None,
480
+ encoder_attention_mask=None,
481
+ past_key_value=None,
482
+ output_attentions=False,
483
+ ):
484
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
485
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
486
+ self_attention_outputs = self.attention(
487
+ hidden_states,
488
+ attention_mask,
489
+ head_mask,
490
+ output_attentions=output_attentions,
491
+ past_key_value=self_attn_past_key_value,
492
+ )
493
+ attention_output = self_attention_outputs[0]
494
+
495
+ # if decoder, the last output is tuple of self-attn cache
496
+ if self.is_decoder:
497
+ outputs = self_attention_outputs[1:-1]
498
+ present_key_value = self_attention_outputs[-1]
499
+ else:
500
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
501
+
502
+ cross_attn_present_key_value = None
503
+ if self.is_decoder and encoder_hidden_states is not None:
504
+ if not hasattr(self, "crossattention"):
505
+ raise ValueError(
506
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
507
+ " by setting `config.add_cross_attention=True`"
508
+ )
509
+
510
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
511
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
512
+ cross_attention_outputs = self.crossattention(
513
+ attention_output,
514
+ attention_mask,
515
+ head_mask,
516
+ encoder_hidden_states,
517
+ encoder_attention_mask,
518
+ cross_attn_past_key_value,
519
+ output_attentions,
520
+ )
521
+ attention_output = cross_attention_outputs[0]
522
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
523
+
524
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
525
+ cross_attn_present_key_value = cross_attention_outputs[-1]
526
+ present_key_value = present_key_value + cross_attn_present_key_value
527
+
528
+ layer_output = self.feed_forward_chunk(attention_output)
529
+ outputs = (layer_output,) + outputs
530
+
531
+ # if decoder, return the attn key/values as the last output
532
+ if self.is_decoder:
533
+ outputs = outputs + (present_key_value,)
534
+
535
+ return outputs
536
+
537
+ def feed_forward_chunk(self, attention_output):
538
+ intermediate_output = self.intermediate(attention_output)
539
+ layer_output = self.output(intermediate_output, attention_output)
540
+ return layer_output
541
+
542
+
543
+ # Based on transformers.models.bert.modeling_bert.BertEncoder with Bert -> QDQBert
544
+ class QDQBertEncoder(nn.Module):
545
+ def __init__(self, config):
546
+ super().__init__()
547
+ self.config = config
548
+ self.layer = nn.ModuleList([QDQBertLayer(config) for _ in range(config.num_hidden_layers)])
549
+ self.gradient_checkpointing = False
550
+
551
+ def forward(
552
+ self,
553
+ hidden_states,
554
+ attention_mask=None,
555
+ head_mask=None,
556
+ encoder_hidden_states=None,
557
+ encoder_attention_mask=None,
558
+ past_key_values=None,
559
+ use_cache=None,
560
+ output_attentions=False,
561
+ output_hidden_states=False,
562
+ return_dict=True,
563
+ ):
564
+ all_hidden_states = () if output_hidden_states else None
565
+ all_self_attentions = () if output_attentions else None
566
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
567
+
568
+ next_decoder_cache = () if use_cache else None
569
+ for i, layer_module in enumerate(self.layer):
570
+ if output_hidden_states:
571
+ all_hidden_states = all_hidden_states + (hidden_states,)
572
+
573
+ layer_head_mask = head_mask[i] if head_mask is not None else None
574
+ past_key_value = past_key_values[i] if past_key_values is not None else None
575
+
576
+ if self.gradient_checkpointing and self.training:
577
+ if use_cache:
578
+ logger.warning_once(
579
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
580
+ )
581
+ use_cache = False
582
+ layer_outputs = self._gradient_checkpointing_func(
583
+ layer_module.__call__,
584
+ hidden_states,
585
+ attention_mask,
586
+ layer_head_mask,
587
+ encoder_hidden_states,
588
+ encoder_attention_mask,
589
+ past_key_value,
590
+ output_attentions,
591
+ )
592
+ else:
593
+ layer_outputs = layer_module(
594
+ hidden_states,
595
+ attention_mask,
596
+ layer_head_mask,
597
+ encoder_hidden_states,
598
+ encoder_attention_mask,
599
+ past_key_value,
600
+ output_attentions,
601
+ )
602
+
603
+ hidden_states = layer_outputs[0]
604
+ if use_cache:
605
+ next_decoder_cache += (layer_outputs[-1],)
606
+ if output_attentions:
607
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
608
+ if self.config.add_cross_attention:
609
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
610
+
611
+ if output_hidden_states:
612
+ all_hidden_states = all_hidden_states + (hidden_states,)
613
+
614
+ if not return_dict:
615
+ return tuple(
616
+ v
617
+ for v in [
618
+ hidden_states,
619
+ next_decoder_cache,
620
+ all_hidden_states,
621
+ all_self_attentions,
622
+ all_cross_attentions,
623
+ ]
624
+ if v is not None
625
+ )
626
+ return BaseModelOutputWithPastAndCrossAttentions(
627
+ last_hidden_state=hidden_states,
628
+ past_key_values=next_decoder_cache,
629
+ hidden_states=all_hidden_states,
630
+ attentions=all_self_attentions,
631
+ cross_attentions=all_cross_attentions,
632
+ )
633
+
634
+
635
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> QDQBert
636
+ class QDQBertPooler(nn.Module):
637
+ def __init__(self, config):
638
+ super().__init__()
639
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
640
+ self.activation = nn.Tanh()
641
+
642
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
643
+ # We "pool" the model by simply taking the hidden state corresponding
644
+ # to the first token.
645
+ first_token_tensor = hidden_states[:, 0]
646
+ pooled_output = self.dense(first_token_tensor)
647
+ pooled_output = self.activation(pooled_output)
648
+ return pooled_output
649
+
650
+
651
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert -> QDQBert
652
+ class QDQBertPredictionHeadTransform(nn.Module):
653
+ def __init__(self, config):
654
+ super().__init__()
655
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
656
+ if isinstance(config.hidden_act, str):
657
+ self.transform_act_fn = ACT2FN[config.hidden_act]
658
+ else:
659
+ self.transform_act_fn = config.hidden_act
660
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
661
+
662
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
663
+ hidden_states = self.dense(hidden_states)
664
+ hidden_states = self.transform_act_fn(hidden_states)
665
+ hidden_states = self.LayerNorm(hidden_states)
666
+ return hidden_states
667
+
668
+
669
+ # Based on transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert -> QDQBert
670
+ class QDQBertLMPredictionHead(nn.Module):
671
+ def __init__(self, config):
672
+ super().__init__()
673
+ self.transform = QDQBertPredictionHeadTransform(config)
674
+
675
+ # The output weights are the same as the input embeddings, but there is
676
+ # an output-only bias for each token.
677
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
678
+
679
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
680
+
681
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
682
+ self.decoder.bias = self.bias
683
+
684
+ def forward(self, hidden_states):
685
+ hidden_states = self.transform(hidden_states)
686
+ hidden_states = self.decoder(hidden_states)
687
+ return hidden_states
688
+
689
+
690
+ # Based on transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert -> QDQBert
691
+ class QDQBertOnlyMLMHead(nn.Module):
692
+ def __init__(self, config):
693
+ super().__init__()
694
+ self.predictions = QDQBertLMPredictionHead(config)
695
+
696
+ def forward(self, sequence_output):
697
+ prediction_scores = self.predictions(sequence_output)
698
+ return prediction_scores
699
+
700
+
701
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert -> QDQBert
702
+ class QDQBertOnlyNSPHead(nn.Module):
703
+ def __init__(self, config):
704
+ super().__init__()
705
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
706
+
707
+ def forward(self, pooled_output):
708
+ seq_relationship_score = self.seq_relationship(pooled_output)
709
+ return seq_relationship_score
710
+
711
+
712
+ # Based on transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert -> QDQBert
713
+ class QDQBertPreTrainingHeads(nn.Module):
714
+ def __init__(self, config):
715
+ super().__init__()
716
+ self.predictions = QDQBertLMPredictionHead(config)
717
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
718
+
719
+ def forward(self, sequence_output, pooled_output):
720
+ prediction_scores = self.predictions(sequence_output)
721
+ seq_relationship_score = self.seq_relationship(pooled_output)
722
+ return prediction_scores, seq_relationship_score
723
+
724
+
725
+ # Based on transformers.models.bert.modeling_bert.BertPreTrainedModel with Bert -> QDQBert
726
+ class QDQBertPreTrainedModel(PreTrainedModel):
727
+ """
728
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
729
+ models.
730
+ """
731
+
732
+ config_class = QDQBertConfig
733
+ load_tf_weights = load_tf_weights_in_qdqbert
734
+ base_model_prefix = "bert"
735
+ supports_gradient_checkpointing = True
736
+
737
+ def _init_weights(self, module):
738
+ """Initialize the weights"""
739
+ if isinstance(module, nn.Linear):
740
+ # Slightly different from the TF version which uses truncated_normal for initialization
741
+ # cf https://github.com/pytorch/pytorch/pull/5617
742
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
743
+ if module.bias is not None:
744
+ module.bias.data.zero_()
745
+ elif isinstance(module, nn.Embedding):
746
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
747
+ if module.padding_idx is not None:
748
+ module.weight.data[module.padding_idx].zero_()
749
+ elif isinstance(module, nn.LayerNorm):
750
+ module.bias.data.zero_()
751
+ module.weight.data.fill_(1.0)
752
+
753
+
754
+ QDQBERT_START_DOCSTRING = r"""
755
+
756
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
757
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
758
+ etc.)
759
+
760
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
761
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
762
+ and behavior.
763
+
764
+ Parameters:
765
+ config ([`QDQBertConfig`]): Model configuration class with all the parameters of the model.
766
+ Initializing with a config file does not load the weights associated with the model, only the
767
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
768
+ """
769
+
770
+ QDQBERT_INPUTS_DOCSTRING = r"""
771
+ Args:
772
+ input_ids (`torch.LongTensor` of shape `({0})`):
773
+ Indices of input sequence tokens in the vocabulary.
774
+
775
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
776
+ [`PreTrainedTokenizer.__call__`] for details.
777
+
778
+ [What are input IDs?](../glossary#input-ids)
779
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
780
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
781
+
782
+ - 1 for tokens that are **not masked**,
783
+ - 0 for tokens that are **masked**.
784
+
785
+ [What are attention masks?](../glossary#attention-mask)
786
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
787
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
788
+ 1]`:
789
+
790
+ - 0 corresponds to a *sentence A* token,
791
+ - 1 corresponds to a *sentence B* token.
792
+
793
+ [What are token type IDs?](../glossary#token-type-ids)
794
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
795
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
796
+ config.max_position_embeddings - 1]`.
797
+
798
+ [What are position IDs?](../glossary#position-ids)
799
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
800
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
801
+
802
+ - 1 indicates the head is **not masked**,
803
+ - 0 indicates the head is **masked**.
804
+
805
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
806
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
807
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
808
+ model's internal embedding lookup matrix.
809
+ output_attentions (`bool`, *optional*):
810
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
811
+ tensors for more detail.
812
+ output_hidden_states (`bool`, *optional*):
813
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
814
+ more detail.
815
+ return_dict (`bool`, *optional*):
816
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
817
+ """
818
+
819
+
820
+ @add_start_docstrings(
821
+ "The bare QDQBERT Model transformer outputting raw hidden-states without any specific head on top.",
822
+ QDQBERT_START_DOCSTRING,
823
+ )
824
+ class QDQBertModel(QDQBertPreTrainedModel):
825
+ """
826
+
827
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
828
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
829
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
830
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
831
+
832
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
833
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
834
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
835
+ """
836
+
837
+ def __init__(self, config, add_pooling_layer: bool = True):
838
+ requires_backends(self, "pytorch_quantization")
839
+ super().__init__(config)
840
+ self.config = config
841
+
842
+ self.embeddings = QDQBertEmbeddings(config)
843
+ self.encoder = QDQBertEncoder(config)
844
+
845
+ self.pooler = QDQBertPooler(config) if add_pooling_layer else None
846
+
847
+ # Initialize weights and apply final processing
848
+ self.post_init()
849
+
850
+ def get_input_embeddings(self):
851
+ return self.embeddings.word_embeddings
852
+
853
+ def set_input_embeddings(self, value):
854
+ self.embeddings.word_embeddings = value
855
+
856
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]):
857
+ """
858
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
859
+ class PreTrainedModel
860
+ """
861
+ for layer, heads in heads_to_prune.items():
862
+ self.encoder.layer[layer].attention.prune_heads(heads)
863
+
864
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
865
+ @add_code_sample_docstrings(
866
+ checkpoint=_CHECKPOINT_FOR_DOC,
867
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
868
+ config_class=_CONFIG_FOR_DOC,
869
+ )
870
+ def forward(
871
+ self,
872
+ input_ids: Optional[torch.LongTensor] = None,
873
+ attention_mask: Optional[torch.FloatTensor] = None,
874
+ token_type_ids: Optional[torch.LongTensor] = None,
875
+ position_ids: Optional[torch.LongTensor] = None,
876
+ head_mask: Optional[torch.FloatTensor] = None,
877
+ inputs_embeds: Optional[torch.FloatTensor] = None,
878
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
879
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
880
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
881
+ use_cache: Optional[bool] = None,
882
+ output_attentions: Optional[bool] = None,
883
+ output_hidden_states: Optional[bool] = None,
884
+ return_dict: Optional[bool] = None,
885
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
886
+ r"""
887
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
888
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
889
+ the model is configured as a decoder.
890
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
891
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
892
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
893
+
894
+ - 1 for tokens that are **not masked**,
895
+ - 0 for tokens that are **masked**.
896
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
897
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
898
+
899
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
900
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
901
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
902
+ use_cache (`bool`, *optional*):
903
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
904
+ `past_key_values`).
905
+ """
906
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
907
+ output_hidden_states = (
908
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
909
+ )
910
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
911
+
912
+ if self.config.is_decoder:
913
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
914
+ else:
915
+ use_cache = False
916
+
917
+ if input_ids is not None and inputs_embeds is not None:
918
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
919
+ elif input_ids is not None:
920
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
921
+ input_shape = input_ids.size()
922
+ batch_size, seq_length = input_shape
923
+ elif inputs_embeds is not None:
924
+ input_shape = inputs_embeds.size()[:-1]
925
+ batch_size, seq_length = input_shape
926
+ else:
927
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
928
+
929
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
930
+
931
+ # past_key_values_length
932
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
933
+
934
+ if attention_mask is None:
935
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
936
+
937
+ if token_type_ids is None:
938
+ if hasattr(self.embeddings, "token_type_ids"):
939
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
940
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
941
+ token_type_ids = buffered_token_type_ids_expanded
942
+ else:
943
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
944
+
945
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
946
+ # ourselves in which case we just need to make it broadcastable to all heads.
947
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
948
+
949
+ # If a 2D or 3D attention mask is provided for the cross-attention
950
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
951
+ if self.config.is_decoder and encoder_hidden_states is not None:
952
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
953
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
954
+ if encoder_attention_mask is None:
955
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
956
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
957
+ else:
958
+ encoder_extended_attention_mask = None
959
+
960
+ # Prepare head mask if needed
961
+ # 1.0 in head_mask indicate we keep the head
962
+ # attention_probs has shape bsz x n_heads x N x N
963
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
964
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
965
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
966
+
967
+ embedding_output = self.embeddings(
968
+ input_ids=input_ids,
969
+ position_ids=position_ids,
970
+ token_type_ids=token_type_ids,
971
+ inputs_embeds=inputs_embeds,
972
+ past_key_values_length=past_key_values_length,
973
+ )
974
+ encoder_outputs = self.encoder(
975
+ embedding_output,
976
+ attention_mask=extended_attention_mask,
977
+ head_mask=head_mask,
978
+ encoder_hidden_states=encoder_hidden_states,
979
+ encoder_attention_mask=encoder_extended_attention_mask,
980
+ past_key_values=past_key_values,
981
+ use_cache=use_cache,
982
+ output_attentions=output_attentions,
983
+ output_hidden_states=output_hidden_states,
984
+ return_dict=return_dict,
985
+ )
986
+ sequence_output = encoder_outputs[0]
987
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
988
+
989
+ if not return_dict:
990
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
991
+
992
+ return BaseModelOutputWithPoolingAndCrossAttentions(
993
+ last_hidden_state=sequence_output,
994
+ pooler_output=pooled_output,
995
+ past_key_values=encoder_outputs.past_key_values,
996
+ hidden_states=encoder_outputs.hidden_states,
997
+ attentions=encoder_outputs.attentions,
998
+ cross_attentions=encoder_outputs.cross_attentions,
999
+ )
1000
+
1001
+
1002
+ @add_start_docstrings(
1003
+ """QDQBERT Model with a `language modeling` head on top for CLM fine-tuning.""", QDQBERT_START_DOCSTRING
1004
+ )
1005
+ class QDQBertLMHeadModel(QDQBertPreTrainedModel):
1006
+ _tied_weights_keys = ["predictions.decoder.weight", "predictions.decoder.bias"]
1007
+
1008
+ def __init__(self, config):
1009
+ super().__init__(config)
1010
+
1011
+ if not config.is_decoder:
1012
+ logger.warning("If you want to use `QDQBertLMHeadModel` as a standalone, add `is_decoder=True.`")
1013
+
1014
+ self.bert = QDQBertModel(config, add_pooling_layer=False)
1015
+ self.cls = QDQBertOnlyMLMHead(config)
1016
+
1017
+ # Initialize weights and apply final processing
1018
+ self.post_init()
1019
+
1020
+ def get_output_embeddings(self):
1021
+ return self.cls.predictions.decoder
1022
+
1023
+ def set_output_embeddings(self, new_embeddings):
1024
+ self.cls.predictions.decoder = new_embeddings
1025
+
1026
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1027
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
1028
+ def forward(
1029
+ self,
1030
+ input_ids: Optional[torch.LongTensor] = None,
1031
+ attention_mask: Optional[torch.Tensor] = None,
1032
+ token_type_ids: Optional[torch.LongTensor] = None,
1033
+ position_ids: Optional[torch.LongTensor] = None,
1034
+ head_mask: Optional[torch.Tensor] = None,
1035
+ inputs_embeds: Optional[torch.Tensor] = None,
1036
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1037
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1038
+ labels: Optional[torch.LongTensor] = None,
1039
+ past_key_values: Optional[Tuple[Tuple[torch.LongTensor]]] = None,
1040
+ use_cache: Optional[bool] = None,
1041
+ output_attentions: Optional[bool] = None,
1042
+ output_hidden_states: Optional[bool] = None,
1043
+ return_dict: Optional[bool] = None,
1044
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1045
+ r"""
1046
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1047
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1048
+ the model is configured as a decoder.
1049
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
1050
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1051
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1052
+
1053
+ - 1 for tokens that are **not masked**,
1054
+ - 0 for tokens that are **masked**.
1055
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1056
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
1057
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
1058
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
1059
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
1060
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1061
+
1062
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1063
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1064
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1065
+ use_cache (`bool`, *optional*):
1066
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1067
+ `past_key_values`).
1068
+
1069
+ Returns:
1070
+
1071
+ Example:
1072
+
1073
+ ```python
1074
+ >>> from transformers import AutoTokenizer, QDQBertLMHeadModel, QDQBertConfig
1075
+ >>> import torch
1076
+
1077
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
1078
+ >>> config = QDQBertConfig.from_pretrained("google-bert/bert-base-cased")
1079
+ >>> config.is_decoder = True
1080
+ >>> model = QDQBertLMHeadModel.from_pretrained("google-bert/bert-base-cased", config=config)
1081
+
1082
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1083
+ >>> outputs = model(**inputs)
1084
+
1085
+ >>> prediction_logits = outputs.logits
1086
+ ```"""
1087
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1088
+ if labels is not None:
1089
+ use_cache = False
1090
+
1091
+ outputs = self.bert(
1092
+ input_ids,
1093
+ attention_mask=attention_mask,
1094
+ token_type_ids=token_type_ids,
1095
+ position_ids=position_ids,
1096
+ head_mask=head_mask,
1097
+ inputs_embeds=inputs_embeds,
1098
+ encoder_hidden_states=encoder_hidden_states,
1099
+ encoder_attention_mask=encoder_attention_mask,
1100
+ past_key_values=past_key_values,
1101
+ use_cache=use_cache,
1102
+ output_attentions=output_attentions,
1103
+ output_hidden_states=output_hidden_states,
1104
+ return_dict=return_dict,
1105
+ )
1106
+
1107
+ sequence_output = outputs[0]
1108
+ prediction_scores = self.cls(sequence_output)
1109
+
1110
+ lm_loss = None
1111
+ if labels is not None:
1112
+ # we are doing next-token prediction; shift prediction scores and input ids by one
1113
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
1114
+ labels = labels[:, 1:].contiguous()
1115
+ loss_fct = CrossEntropyLoss()
1116
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1117
+
1118
+ if not return_dict:
1119
+ output = (prediction_scores,) + outputs[2:]
1120
+ return ((lm_loss,) + output) if lm_loss is not None else output
1121
+
1122
+ return CausalLMOutputWithCrossAttentions(
1123
+ loss=lm_loss,
1124
+ logits=prediction_scores,
1125
+ past_key_values=outputs.past_key_values,
1126
+ hidden_states=outputs.hidden_states,
1127
+ attentions=outputs.attentions,
1128
+ cross_attentions=outputs.cross_attentions,
1129
+ )
1130
+
1131
+ def prepare_inputs_for_generation(
1132
+ self,
1133
+ input_ids: Optional[torch.LongTensor],
1134
+ past_key_values=None,
1135
+ attention_mask: Optional[torch.Tensor] = None,
1136
+ **model_kwargs,
1137
+ ):
1138
+ input_shape = input_ids.shape
1139
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
1140
+ if attention_mask is None:
1141
+ attention_mask = input_ids.new_ones(input_shape)
1142
+
1143
+ # cut decoder_input_ids if past_key_values is used
1144
+ if past_key_values is not None:
1145
+ past_length = past_key_values[0][0].shape[2]
1146
+
1147
+ # Some generation methods already pass only the last input ID
1148
+ if input_ids.shape[1] > past_length:
1149
+ remove_prefix_length = past_length
1150
+ else:
1151
+ # Default to old behavior: keep only final ID
1152
+ remove_prefix_length = input_ids.shape[1] - 1
1153
+
1154
+ input_ids = input_ids[:, remove_prefix_length:]
1155
+
1156
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
1157
+
1158
+ def _reorder_cache(self, past_key_values, beam_idx):
1159
+ reordered_past = ()
1160
+ for layer_past in past_key_values:
1161
+ reordered_past += (
1162
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1163
+ )
1164
+ return reordered_past
1165
+
1166
+
1167
+ @add_start_docstrings("""QDQBERT Model with a `language modeling` head on top.""", QDQBERT_START_DOCSTRING)
1168
+ class QDQBertForMaskedLM(QDQBertPreTrainedModel):
1169
+ _tied_weights_keys = ["predictions.decoder.weight", "predictions.decoder.bias"]
1170
+
1171
+ def __init__(self, config):
1172
+ super().__init__(config)
1173
+
1174
+ if config.is_decoder:
1175
+ logger.warning(
1176
+ "If you want to use `QDQBertForMaskedLM` make sure `config.is_decoder=False` for "
1177
+ "bi-directional self-attention."
1178
+ )
1179
+
1180
+ self.bert = QDQBertModel(config, add_pooling_layer=False)
1181
+ self.cls = QDQBertOnlyMLMHead(config)
1182
+
1183
+ # Initialize weights and apply final processing
1184
+ self.post_init()
1185
+
1186
+ def get_output_embeddings(self):
1187
+ return self.cls.predictions.decoder
1188
+
1189
+ def set_output_embeddings(self, new_embeddings):
1190
+ self.cls.predictions.decoder = new_embeddings
1191
+
1192
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1193
+ @add_code_sample_docstrings(
1194
+ checkpoint=_CHECKPOINT_FOR_DOC,
1195
+ output_type=MaskedLMOutput,
1196
+ config_class=_CONFIG_FOR_DOC,
1197
+ )
1198
+ def forward(
1199
+ self,
1200
+ input_ids: Optional[torch.LongTensor] = None,
1201
+ attention_mask: Optional[torch.FloatTensor] = None,
1202
+ token_type_ids: Optional[torch.LongTensor] = None,
1203
+ position_ids: Optional[torch.LongTensor] = None,
1204
+ head_mask: Optional[torch.FloatTensor] = None,
1205
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1206
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1207
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1208
+ labels: Optional[torch.LongTensor] = None,
1209
+ output_attentions: Optional[bool] = None,
1210
+ output_hidden_states: Optional[bool] = None,
1211
+ return_dict: Optional[bool] = None,
1212
+ ) -> Union[Tuple, MaskedLMOutput]:
1213
+ r"""
1214
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1215
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1216
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1217
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1218
+ """
1219
+
1220
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1221
+
1222
+ outputs = self.bert(
1223
+ input_ids,
1224
+ attention_mask=attention_mask,
1225
+ token_type_ids=token_type_ids,
1226
+ position_ids=position_ids,
1227
+ head_mask=head_mask,
1228
+ inputs_embeds=inputs_embeds,
1229
+ encoder_hidden_states=encoder_hidden_states,
1230
+ encoder_attention_mask=encoder_attention_mask,
1231
+ output_attentions=output_attentions,
1232
+ output_hidden_states=output_hidden_states,
1233
+ return_dict=return_dict,
1234
+ )
1235
+
1236
+ sequence_output = outputs[0]
1237
+ prediction_scores = self.cls(sequence_output)
1238
+
1239
+ masked_lm_loss = None
1240
+ if labels is not None:
1241
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1242
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1243
+
1244
+ if not return_dict:
1245
+ output = (prediction_scores,) + outputs[2:]
1246
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1247
+
1248
+ return MaskedLMOutput(
1249
+ loss=masked_lm_loss,
1250
+ logits=prediction_scores,
1251
+ hidden_states=outputs.hidden_states,
1252
+ attentions=outputs.attentions,
1253
+ )
1254
+
1255
+ def prepare_inputs_for_generation(
1256
+ self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor] = None, **model_kwargs
1257
+ ):
1258
+ input_shape = input_ids.shape
1259
+ effective_batch_size = input_shape[0]
1260
+
1261
+ # add a dummy token
1262
+ if self.config.pad_token_id is None:
1263
+ raise ValueError("The PAD token should be defined for generation")
1264
+
1265
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1266
+ dummy_token = torch.full(
1267
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1268
+ )
1269
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1270
+
1271
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1272
+
1273
+
1274
+ @add_start_docstrings(
1275
+ """Bert Model with a `next sentence prediction (classification)` head on top.""",
1276
+ QDQBERT_START_DOCSTRING,
1277
+ )
1278
+ class QDQBertForNextSentencePrediction(QDQBertPreTrainedModel):
1279
+ def __init__(self, config):
1280
+ super().__init__(config)
1281
+
1282
+ self.bert = QDQBertModel(config)
1283
+ self.cls = QDQBertOnlyNSPHead(config)
1284
+
1285
+ # Initialize weights and apply final processing
1286
+ self.post_init()
1287
+
1288
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1289
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1290
+ def forward(
1291
+ self,
1292
+ input_ids: Optional[torch.LongTensor] = None,
1293
+ attention_mask: Optional[torch.FloatTensor] = None,
1294
+ token_type_ids: Optional[torch.LongTensor] = None,
1295
+ position_ids: Optional[torch.LongTensor] = None,
1296
+ head_mask: Optional[torch.FloatTensor] = None,
1297
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1298
+ labels: Optional[torch.LongTensor] = None,
1299
+ output_attentions: Optional[bool] = None,
1300
+ output_hidden_states: Optional[bool] = None,
1301
+ return_dict: Optional[bool] = None,
1302
+ **kwargs,
1303
+ ) -> Union[Tuple, NextSentencePredictorOutput]:
1304
+ r"""
1305
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1306
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1307
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1308
+
1309
+ - 0 indicates sequence B is a continuation of sequence A,
1310
+ - 1 indicates sequence B is a random sequence.
1311
+
1312
+ Returns:
1313
+
1314
+ Example:
1315
+
1316
+ ```python
1317
+ >>> from transformers import AutoTokenizer, QDQBertForNextSentencePrediction
1318
+ >>> import torch
1319
+
1320
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
1321
+ >>> model = QDQBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")
1322
+
1323
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1324
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1325
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1326
+
1327
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1328
+ >>> logits = outputs.logits
1329
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1330
+ ```"""
1331
+
1332
+ if "next_sentence_label" in kwargs:
1333
+ warnings.warn(
1334
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1335
+ " `labels` instead.",
1336
+ FutureWarning,
1337
+ )
1338
+ labels = kwargs.pop("next_sentence_label")
1339
+
1340
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1341
+
1342
+ outputs = self.bert(
1343
+ input_ids,
1344
+ attention_mask=attention_mask,
1345
+ token_type_ids=token_type_ids,
1346
+ position_ids=position_ids,
1347
+ head_mask=head_mask,
1348
+ inputs_embeds=inputs_embeds,
1349
+ output_attentions=output_attentions,
1350
+ output_hidden_states=output_hidden_states,
1351
+ return_dict=return_dict,
1352
+ )
1353
+
1354
+ pooled_output = outputs[1]
1355
+
1356
+ seq_relationship_scores = self.cls(pooled_output)
1357
+
1358
+ next_sentence_loss = None
1359
+ if labels is not None:
1360
+ loss_fct = CrossEntropyLoss()
1361
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1362
+
1363
+ if not return_dict:
1364
+ output = (seq_relationship_scores,) + outputs[2:]
1365
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1366
+
1367
+ return NextSentencePredictorOutput(
1368
+ loss=next_sentence_loss,
1369
+ logits=seq_relationship_scores,
1370
+ hidden_states=outputs.hidden_states,
1371
+ attentions=outputs.attentions,
1372
+ )
1373
+
1374
+
1375
+ @add_start_docstrings(
1376
+ """
1377
+ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1378
+ output) e.g. for GLUE tasks.
1379
+ """,
1380
+ QDQBERT_START_DOCSTRING,
1381
+ )
1382
+ class QDQBertForSequenceClassification(QDQBertPreTrainedModel):
1383
+ def __init__(self, config):
1384
+ super().__init__(config)
1385
+ self.num_labels = config.num_labels
1386
+ self.config = config
1387
+
1388
+ self.bert = QDQBertModel(config)
1389
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1390
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1391
+ # Initialize weights and apply final processing
1392
+ self.post_init()
1393
+
1394
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1395
+ @add_code_sample_docstrings(
1396
+ checkpoint=_CHECKPOINT_FOR_DOC,
1397
+ output_type=SequenceClassifierOutput,
1398
+ config_class=_CONFIG_FOR_DOC,
1399
+ )
1400
+ def forward(
1401
+ self,
1402
+ input_ids: Optional[torch.LongTensor] = None,
1403
+ attention_mask: Optional[torch.FloatTensor] = None,
1404
+ token_type_ids: Optional[torch.LongTensor] = None,
1405
+ position_ids: Optional[torch.LongTensor] = None,
1406
+ head_mask: Optional[torch.FloatTensor] = None,
1407
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1408
+ labels: Optional[torch.LongTensor] = None,
1409
+ output_attentions: Optional[bool] = None,
1410
+ output_hidden_states: Optional[bool] = None,
1411
+ return_dict: Optional[bool] = None,
1412
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1413
+ r"""
1414
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1415
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1416
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1417
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1418
+ """
1419
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1420
+
1421
+ outputs = self.bert(
1422
+ input_ids,
1423
+ attention_mask=attention_mask,
1424
+ token_type_ids=token_type_ids,
1425
+ position_ids=position_ids,
1426
+ head_mask=head_mask,
1427
+ inputs_embeds=inputs_embeds,
1428
+ output_attentions=output_attentions,
1429
+ output_hidden_states=output_hidden_states,
1430
+ return_dict=return_dict,
1431
+ )
1432
+
1433
+ pooled_output = outputs[1]
1434
+
1435
+ pooled_output = self.dropout(pooled_output)
1436
+ logits = self.classifier(pooled_output)
1437
+
1438
+ loss = None
1439
+ if labels is not None:
1440
+ if self.config.problem_type is None:
1441
+ if self.num_labels == 1:
1442
+ self.config.problem_type = "regression"
1443
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1444
+ self.config.problem_type = "single_label_classification"
1445
+ else:
1446
+ self.config.problem_type = "multi_label_classification"
1447
+
1448
+ if self.config.problem_type == "regression":
1449
+ loss_fct = MSELoss()
1450
+ if self.num_labels == 1:
1451
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1452
+ else:
1453
+ loss = loss_fct(logits, labels)
1454
+ elif self.config.problem_type == "single_label_classification":
1455
+ loss_fct = CrossEntropyLoss()
1456
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1457
+ elif self.config.problem_type == "multi_label_classification":
1458
+ loss_fct = BCEWithLogitsLoss()
1459
+ loss = loss_fct(logits, labels)
1460
+ if not return_dict:
1461
+ output = (logits,) + outputs[2:]
1462
+ return ((loss,) + output) if loss is not None else output
1463
+
1464
+ return SequenceClassifierOutput(
1465
+ loss=loss,
1466
+ logits=logits,
1467
+ hidden_states=outputs.hidden_states,
1468
+ attentions=outputs.attentions,
1469
+ )
1470
+
1471
+
1472
+ @add_start_docstrings(
1473
+ """
1474
+ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1475
+ softmax) e.g. for RocStories/SWAG tasks.
1476
+ """,
1477
+ QDQBERT_START_DOCSTRING,
1478
+ )
1479
+ class QDQBertForMultipleChoice(QDQBertPreTrainedModel):
1480
+ def __init__(self, config):
1481
+ super().__init__(config)
1482
+
1483
+ self.bert = QDQBertModel(config)
1484
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1485
+ self.classifier = nn.Linear(config.hidden_size, 1)
1486
+
1487
+ # Initialize weights and apply final processing
1488
+ self.post_init()
1489
+
1490
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1491
+ @add_code_sample_docstrings(
1492
+ checkpoint=_CHECKPOINT_FOR_DOC,
1493
+ output_type=MultipleChoiceModelOutput,
1494
+ config_class=_CONFIG_FOR_DOC,
1495
+ )
1496
+ def forward(
1497
+ self,
1498
+ input_ids: Optional[torch.LongTensor] = None,
1499
+ attention_mask: Optional[torch.FloatTensor] = None,
1500
+ token_type_ids: Optional[torch.LongTensor] = None,
1501
+ position_ids: Optional[torch.LongTensor] = None,
1502
+ head_mask: Optional[torch.FloatTensor] = None,
1503
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1504
+ labels: Optional[torch.LongTensor] = None,
1505
+ output_attentions: Optional[bool] = None,
1506
+ output_hidden_states: Optional[bool] = None,
1507
+ return_dict: Optional[bool] = None,
1508
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1509
+ r"""
1510
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1511
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1512
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1513
+ `input_ids` above)
1514
+ """
1515
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1516
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1517
+
1518
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1519
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1520
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1521
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1522
+ inputs_embeds = (
1523
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1524
+ if inputs_embeds is not None
1525
+ else None
1526
+ )
1527
+
1528
+ outputs = self.bert(
1529
+ input_ids,
1530
+ attention_mask=attention_mask,
1531
+ token_type_ids=token_type_ids,
1532
+ position_ids=position_ids,
1533
+ head_mask=head_mask,
1534
+ inputs_embeds=inputs_embeds,
1535
+ output_attentions=output_attentions,
1536
+ output_hidden_states=output_hidden_states,
1537
+ return_dict=return_dict,
1538
+ )
1539
+
1540
+ pooled_output = outputs[1]
1541
+
1542
+ pooled_output = self.dropout(pooled_output)
1543
+ logits = self.classifier(pooled_output)
1544
+ reshaped_logits = logits.view(-1, num_choices)
1545
+
1546
+ loss = None
1547
+ if labels is not None:
1548
+ loss_fct = CrossEntropyLoss()
1549
+ loss = loss_fct(reshaped_logits, labels)
1550
+
1551
+ if not return_dict:
1552
+ output = (reshaped_logits,) + outputs[2:]
1553
+ return ((loss,) + output) if loss is not None else output
1554
+
1555
+ return MultipleChoiceModelOutput(
1556
+ loss=loss,
1557
+ logits=reshaped_logits,
1558
+ hidden_states=outputs.hidden_states,
1559
+ attentions=outputs.attentions,
1560
+ )
1561
+
1562
+
1563
+ @add_start_docstrings(
1564
+ """
1565
+ QDQBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1566
+ Named-Entity-Recognition (NER) tasks.
1567
+ """,
1568
+ QDQBERT_START_DOCSTRING,
1569
+ )
1570
+ class QDQBertForTokenClassification(QDQBertPreTrainedModel):
1571
+ def __init__(self, config):
1572
+ super().__init__(config)
1573
+ self.num_labels = config.num_labels
1574
+
1575
+ self.bert = QDQBertModel(config, add_pooling_layer=False)
1576
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1577
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1578
+
1579
+ # Initialize weights and apply final processing
1580
+ self.post_init()
1581
+
1582
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1583
+ @add_code_sample_docstrings(
1584
+ checkpoint=_CHECKPOINT_FOR_DOC,
1585
+ output_type=TokenClassifierOutput,
1586
+ config_class=_CONFIG_FOR_DOC,
1587
+ )
1588
+ def forward(
1589
+ self,
1590
+ input_ids: Optional[torch.LongTensor] = None,
1591
+ attention_mask: Optional[torch.FloatTensor] = None,
1592
+ token_type_ids: Optional[torch.LongTensor] = None,
1593
+ position_ids: Optional[torch.LongTensor] = None,
1594
+ head_mask: Optional[torch.FloatTensor] = None,
1595
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1596
+ labels: Optional[torch.LongTensor] = None,
1597
+ output_attentions: Optional[bool] = None,
1598
+ output_hidden_states: Optional[bool] = None,
1599
+ return_dict: Optional[bool] = None,
1600
+ ) -> Union[Tuple, TokenClassifierOutput]:
1601
+ r"""
1602
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1603
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1604
+ """
1605
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1606
+
1607
+ outputs = self.bert(
1608
+ input_ids,
1609
+ attention_mask=attention_mask,
1610
+ token_type_ids=token_type_ids,
1611
+ position_ids=position_ids,
1612
+ head_mask=head_mask,
1613
+ inputs_embeds=inputs_embeds,
1614
+ output_attentions=output_attentions,
1615
+ output_hidden_states=output_hidden_states,
1616
+ return_dict=return_dict,
1617
+ )
1618
+
1619
+ sequence_output = outputs[0]
1620
+
1621
+ sequence_output = self.dropout(sequence_output)
1622
+ logits = self.classifier(sequence_output)
1623
+
1624
+ loss = None
1625
+ if labels is not None:
1626
+ loss_fct = CrossEntropyLoss()
1627
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1628
+
1629
+ if not return_dict:
1630
+ output = (logits,) + outputs[2:]
1631
+ return ((loss,) + output) if loss is not None else output
1632
+
1633
+ return TokenClassifierOutput(
1634
+ loss=loss,
1635
+ logits=logits,
1636
+ hidden_states=outputs.hidden_states,
1637
+ attentions=outputs.attentions,
1638
+ )
1639
+
1640
+
1641
+ @add_start_docstrings(
1642
+ """
1643
+ QDQBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1644
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1645
+ """,
1646
+ QDQBERT_START_DOCSTRING,
1647
+ )
1648
+ class QDQBertForQuestionAnswering(QDQBertPreTrainedModel):
1649
+ def __init__(self, config):
1650
+ super().__init__(config)
1651
+ self.num_labels = config.num_labels
1652
+
1653
+ self.bert = QDQBertModel(config, add_pooling_layer=False)
1654
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1655
+
1656
+ # Initialize weights and apply final processing
1657
+ self.post_init()
1658
+
1659
+ @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1660
+ @add_code_sample_docstrings(
1661
+ checkpoint=_CHECKPOINT_FOR_DOC,
1662
+ output_type=QuestionAnsweringModelOutput,
1663
+ config_class=_CONFIG_FOR_DOC,
1664
+ )
1665
+ def forward(
1666
+ self,
1667
+ input_ids: Optional[torch.LongTensor] = None,
1668
+ attention_mask: Optional[torch.FloatTensor] = None,
1669
+ token_type_ids: Optional[torch.LongTensor] = None,
1670
+ position_ids: Optional[torch.LongTensor] = None,
1671
+ head_mask: Optional[torch.FloatTensor] = None,
1672
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1673
+ start_positions: Optional[torch.LongTensor] = None,
1674
+ end_positions: Optional[torch.LongTensor] = None,
1675
+ output_attentions: Optional[bool] = None,
1676
+ output_hidden_states: Optional[bool] = None,
1677
+ return_dict: Optional[bool] = None,
1678
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1679
+ r"""
1680
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1681
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1682
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1683
+ are not taken into account for computing the loss.
1684
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1685
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1686
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1687
+ are not taken into account for computing the loss.
1688
+ """
1689
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1690
+
1691
+ outputs = self.bert(
1692
+ input_ids,
1693
+ attention_mask=attention_mask,
1694
+ token_type_ids=token_type_ids,
1695
+ position_ids=position_ids,
1696
+ head_mask=head_mask,
1697
+ inputs_embeds=inputs_embeds,
1698
+ output_attentions=output_attentions,
1699
+ output_hidden_states=output_hidden_states,
1700
+ return_dict=return_dict,
1701
+ )
1702
+
1703
+ sequence_output = outputs[0]
1704
+
1705
+ logits = self.qa_outputs(sequence_output)
1706
+ start_logits, end_logits = logits.split(1, dim=-1)
1707
+ start_logits = start_logits.squeeze(-1).contiguous()
1708
+ end_logits = end_logits.squeeze(-1).contiguous()
1709
+
1710
+ total_loss = None
1711
+ if start_positions is not None and end_positions is not None:
1712
+ # If we are on multi-GPU, split add a dimension
1713
+ if len(start_positions.size()) > 1:
1714
+ start_positions = start_positions.squeeze(-1)
1715
+ if len(end_positions.size()) > 1:
1716
+ end_positions = end_positions.squeeze(-1)
1717
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1718
+ ignored_index = start_logits.size(1)
1719
+ start_positions = start_positions.clamp(0, ignored_index)
1720
+ end_positions = end_positions.clamp(0, ignored_index)
1721
+
1722
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1723
+ start_loss = loss_fct(start_logits, start_positions)
1724
+ end_loss = loss_fct(end_logits, end_positions)
1725
+ total_loss = (start_loss + end_loss) / 2
1726
+
1727
+ if not return_dict:
1728
+ output = (start_logits, end_logits) + outputs[2:]
1729
+ return ((total_loss,) + output) if total_loss is not None else output
1730
+
1731
+ return QuestionAnsweringModelOutput(
1732
+ loss=total_loss,
1733
+ start_logits=start_logits,
1734
+ end_logits=end_logits,
1735
+ hidden_states=outputs.hidden_states,
1736
+ attentions=outputs.attentions,
1737
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_rag": ["RagConfig"],
22
+ "retrieval_rag": ["RagRetriever"],
23
+ "tokenization_rag": ["RagTokenizer"],
24
+ }
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_rag"] = [
33
+ "RagModel",
34
+ "RagPreTrainedModel",
35
+ "RagSequenceForGeneration",
36
+ "RagTokenForGeneration",
37
+ ]
38
+
39
+ try:
40
+ if not is_tf_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_tf_rag"] = [
46
+ "TFRagModel",
47
+ "TFRagPreTrainedModel",
48
+ "TFRagSequenceForGeneration",
49
+ "TFRagTokenForGeneration",
50
+ ]
51
+
52
+
53
+ if TYPE_CHECKING:
54
+ from .configuration_rag import RagConfig
55
+ from .retrieval_rag import RagRetriever
56
+ from .tokenization_rag import RagTokenizer
57
+
58
+ try:
59
+ if not is_torch_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
65
+
66
+ try:
67
+ if not is_tf_available():
68
+ raise OptionalDependencyNotAvailable()
69
+ except OptionalDependencyNotAvailable:
70
+ pass
71
+ else:
72
+ from .modeling_tf_rag import (
73
+ TFRagModel,
74
+ TFRagPreTrainedModel,
75
+ TFRagSequenceForGeneration,
76
+ TFRagTokenForGeneration,
77
+ )
78
+
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc ADDED
Binary file (6.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc ADDED
Binary file (63.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc ADDED
Binary file (64.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc ADDED
Binary file (26.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc ADDED
Binary file (3.68 kB). View file