applied-ai-018 commited on
Commit
7ac701f
·
verified ·
1 Parent(s): 3cc28fd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py +63 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py +245 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/__init__.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/configuration_bert.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_original_tf2_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_pytorch_checkpoint_to_original_tf.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_bert.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_flax_bert.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_tf_bert.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert_fast.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert_tf.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py +112 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py +187 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert.py +500 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__init__.py +130 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py +160 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py +57 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/modeling_convbert.py +1337 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/modeling_tf_convbert.py +1468 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py +503 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert_fast.py +172 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/image_processing_mask2former.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/mluke/__init__.py +44 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py +229 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/mluke/tokenization_mluke.py +1614 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__init__.py +140 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/__init__.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/configuration_pegasus.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/convert_pegasus_tf_to_pytorch.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/modeling_flax_pegasus.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/modeling_pegasus.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/modeling_tf_pegasus.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/tokenization_pegasus.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/tokenization_pegasus_fast.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/configuration_pegasus.py +164 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py +131 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_autoformer": [
22
+ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "AutoformerConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_autoformer"] = [
34
+ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "AutoformerForPrediction",
36
+ "AutoformerModel",
37
+ "AutoformerPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_autoformer import (
43
+ AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ AutoformerConfig,
45
+ )
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_autoformer import (
54
+ AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ AutoformerForPrediction,
56
+ AutoformerModel,
57
+ AutoformerPreTrainedModel,
58
+ )
59
+
60
+ else:
61
+ import sys
62
+
63
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (959 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc ADDED
Binary file (79.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Autoformer model configuration"""
16
+
17
+ from typing import List, Optional
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class AutoformerConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an
32
+ Autoformer model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the Autoformer
34
+ [huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly)
35
+ architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ prediction_length (`int`):
42
+ The prediction length for the decoder. In other words, the prediction horizon of the model.
43
+ context_length (`int`, *optional*, defaults to `prediction_length`):
44
+ The context length for the encoder. If unset, the context length will be the same as the
45
+ `prediction_length`.
46
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
47
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
48
+ loss (`string`, *optional*, defaults to `"nll"`):
49
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
50
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
51
+ input_size (`int`, *optional*, defaults to 1):
52
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
53
+ multivariate targets.
54
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
55
+ The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
56
+ 5, 6, 7]`.
57
+ scaling (`bool`, *optional* defaults to `True`):
58
+ Whether to scale the input targets.
59
+ num_time_features (`int`, *optional*, defaults to 0):
60
+ The number of time features in the input time series.
61
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
62
+ The number of dynamic real valued features.
63
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
64
+ The number of static categorical features.
65
+ num_static_real_features (`int`, *optional*, defaults to 0):
66
+ The number of static real valued features.
67
+ cardinality (`list[int]`, *optional*):
68
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
69
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
70
+ `num_static_categorical_features` is > 0.
71
+ embedding_dimension (`list[int]`, *optional*):
72
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
73
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
74
+ `num_static_categorical_features` is > 0.
75
+ d_model (`int`, *optional*, defaults to 64):
76
+ Dimensionality of the transformer layers.
77
+ encoder_layers (`int`, *optional*, defaults to 2):
78
+ Number of encoder layers.
79
+ decoder_layers (`int`, *optional*, defaults to 2):
80
+ Number of decoder layers.
81
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
82
+ Number of attention heads for each attention layer in the Transformer encoder.
83
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
84
+ Number of attention heads for each attention layer in the Transformer decoder.
85
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
86
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
87
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
88
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
89
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
90
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
91
+ `"relu"` are supported.
92
+ dropout (`float`, *optional*, defaults to 0.1):
93
+ The dropout probability for all fully connected layers in the encoder, and decoder.
94
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
95
+ The dropout probability for the attention and fully connected layers for each encoder layer.
96
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
97
+ The dropout probability for the attention and fully connected layers for each decoder layer.
98
+ attention_dropout (`float`, *optional*, defaults to 0.1):
99
+ The dropout probability for the attention probabilities.
100
+ activation_dropout (`float`, *optional*, defaults to 0.1):
101
+ The dropout probability used between the two layers of the feed-forward networks.
102
+ num_parallel_samples (`int`, *optional*, defaults to 100):
103
+ The number of samples to generate in parallel for each time step of inference.
104
+ init_std (`float`, *optional*, defaults to 0.02):
105
+ The standard deviation of the truncated normal weight initialization distribution.
106
+ use_cache (`bool`, *optional*, defaults to `True`):
107
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
108
+ label_length (`int`, *optional*, defaults to 10):
109
+ Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
110
+ non-autoregressive generation).
111
+ moving_average (`int`, defaults to 25):
112
+ The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
113
+ Layer.
114
+ autocorrelation_factor (`int`, defaults to 3):
115
+ "Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
116
+ It's recommended in the paper to set it to a number between 1 and 5.
117
+
118
+
119
+ Example:
120
+
121
+ ```python
122
+ >>> from transformers import AutoformerConfig, AutoformerModel
123
+
124
+ >>> # Initializing a default Autoformer configuration
125
+ >>> configuration = AutoformerConfig()
126
+
127
+ >>> # Randomly initializing a model (with random weights) from the configuration
128
+ >>> model = AutoformerModel(configuration)
129
+
130
+ >>> # Accessing the model configuration
131
+ >>> configuration = model.config
132
+ ```"""
133
+
134
+ model_type = "autoformer"
135
+ attribute_map = {
136
+ "hidden_size": "d_model",
137
+ "num_attention_heads": "encoder_attention_heads",
138
+ "num_hidden_layers": "encoder_layers",
139
+ }
140
+
141
+ def __init__(
142
+ self,
143
+ prediction_length: Optional[int] = None,
144
+ context_length: Optional[int] = None,
145
+ distribution_output: str = "student_t",
146
+ loss: str = "nll",
147
+ input_size: int = 1,
148
+ lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7],
149
+ scaling: bool = True,
150
+ num_time_features: int = 0,
151
+ num_dynamic_real_features: int = 0,
152
+ num_static_categorical_features: int = 0,
153
+ num_static_real_features: int = 0,
154
+ cardinality: Optional[List[int]] = None,
155
+ embedding_dimension: Optional[List[int]] = None,
156
+ d_model: int = 64,
157
+ encoder_attention_heads: int = 2,
158
+ decoder_attention_heads: int = 2,
159
+ encoder_layers: int = 2,
160
+ decoder_layers: int = 2,
161
+ encoder_ffn_dim: int = 32,
162
+ decoder_ffn_dim: int = 32,
163
+ activation_function: str = "gelu",
164
+ dropout: float = 0.1,
165
+ encoder_layerdrop: float = 0.1,
166
+ decoder_layerdrop: float = 0.1,
167
+ attention_dropout: float = 0.1,
168
+ activation_dropout: float = 0.1,
169
+ num_parallel_samples: int = 100,
170
+ init_std: float = 0.02,
171
+ use_cache: bool = True,
172
+ is_encoder_decoder=True,
173
+ # Autoformer arguments
174
+ label_length: int = 10,
175
+ moving_average: int = 25,
176
+ autocorrelation_factor: int = 3,
177
+ **kwargs,
178
+ ):
179
+ # time series specific configuration
180
+ self.prediction_length = prediction_length
181
+ self.context_length = context_length if context_length is not None else prediction_length
182
+ self.distribution_output = distribution_output
183
+ self.loss = loss
184
+ self.input_size = input_size
185
+ self.num_time_features = num_time_features
186
+ self.lags_sequence = lags_sequence
187
+ self.scaling = scaling
188
+ self.num_dynamic_real_features = num_dynamic_real_features
189
+ self.num_static_real_features = num_static_real_features
190
+ self.num_static_categorical_features = num_static_categorical_features
191
+ if cardinality is not None and num_static_categorical_features > 0:
192
+ if len(cardinality) != num_static_categorical_features:
193
+ raise ValueError(
194
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
195
+ )
196
+ self.cardinality = cardinality
197
+ else:
198
+ self.cardinality = [0]
199
+ if embedding_dimension is not None and num_static_categorical_features > 0:
200
+ if len(embedding_dimension) != num_static_categorical_features:
201
+ raise ValueError(
202
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
203
+ )
204
+ self.embedding_dimension = embedding_dimension
205
+ else:
206
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
207
+ self.num_parallel_samples = num_parallel_samples
208
+
209
+ # Transformer architecture configuration
210
+ self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
211
+ self.d_model = d_model
212
+ self.encoder_attention_heads = encoder_attention_heads
213
+ self.decoder_attention_heads = decoder_attention_heads
214
+ self.encoder_ffn_dim = encoder_ffn_dim
215
+ self.decoder_ffn_dim = decoder_ffn_dim
216
+ self.encoder_layers = encoder_layers
217
+ self.decoder_layers = decoder_layers
218
+
219
+ self.dropout = dropout
220
+ self.attention_dropout = attention_dropout
221
+ self.activation_dropout = activation_dropout
222
+ self.encoder_layerdrop = encoder_layerdrop
223
+ self.decoder_layerdrop = decoder_layerdrop
224
+
225
+ self.activation_function = activation_function
226
+ self.init_std = init_std
227
+
228
+ self.use_cache = use_cache
229
+
230
+ # Autoformer
231
+ self.label_length = label_length
232
+ self.moving_average = moving_average
233
+ self.autocorrelation_factor = autocorrelation_factor
234
+
235
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
236
+
237
+ @property
238
+ def _number_of_features(self) -> int:
239
+ return (
240
+ sum(self.embedding_dimension)
241
+ + self.num_dynamic_real_features
242
+ + self.num_time_features
243
+ + self.num_static_real_features
244
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
245
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/configuration_bert.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_original_tf2_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (5.61 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_pytorch_checkpoint_to_original_tf.cpython-310.pyc ADDED
Binary file (3.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_bert.cpython-310.pyc ADDED
Binary file (54.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_flax_bert.cpython-310.pyc ADDED
Binary file (42.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/modeling_tf_bert.cpython-310.pyc ADDED
Binary file (61.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert_fast.cpython-310.pyc ADDED
Binary file (6.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/__pycache__/tokenization_bert_tf.cpython-310.pyc ADDED
Binary file (9.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint."""
17
+
18
+ import argparse
19
+ import os
20
+
21
+ import numpy as np
22
+ import tensorflow as tf
23
+ import torch
24
+
25
+ from transformers import BertModel
26
+
27
+
28
+ def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str):
29
+ """
30
+ Args:
31
+ model: BertModel Pytorch model instance to be converted
32
+ ckpt_dir: Tensorflow model directory
33
+ model_name: model name
34
+
35
+ Currently supported HF models:
36
+
37
+ - Y BertModel
38
+ - N BertForMaskedLM
39
+ - N BertForPreTraining
40
+ - N BertForMultipleChoice
41
+ - N BertForNextSentencePrediction
42
+ - N BertForSequenceClassification
43
+ - N BertForQuestionAnswering
44
+ """
45
+
46
+ tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
47
+
48
+ var_map = (
49
+ ("layer.", "layer_"),
50
+ ("word_embeddings.weight", "word_embeddings"),
51
+ ("position_embeddings.weight", "position_embeddings"),
52
+ ("token_type_embeddings.weight", "token_type_embeddings"),
53
+ (".", "/"),
54
+ ("LayerNorm/weight", "LayerNorm/gamma"),
55
+ ("LayerNorm/bias", "LayerNorm/beta"),
56
+ ("weight", "kernel"),
57
+ )
58
+
59
+ if not os.path.isdir(ckpt_dir):
60
+ os.makedirs(ckpt_dir)
61
+
62
+ state_dict = model.state_dict()
63
+
64
+ def to_tf_var_name(name: str):
65
+ for patt, repl in iter(var_map):
66
+ name = name.replace(patt, repl)
67
+ return f"bert/{name}"
68
+
69
+ def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session):
70
+ tf_dtype = tf.dtypes.as_dtype(tensor.dtype)
71
+ tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())
72
+ session.run(tf.variables_initializer([tf_var]))
73
+ session.run(tf_var)
74
+ return tf_var
75
+
76
+ tf.reset_default_graph()
77
+ with tf.Session() as session:
78
+ for var_name in state_dict:
79
+ tf_name = to_tf_var_name(var_name)
80
+ torch_tensor = state_dict[var_name].numpy()
81
+ if any(x in var_name for x in tensors_to_transpose):
82
+ torch_tensor = torch_tensor.T
83
+ tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session)
84
+ tf_var.assign(tf.cast(torch_tensor, tf_var.dtype))
85
+ tf_weight = session.run(tf_var)
86
+ print(f"Successfully created {tf_name}: {np.allclose(tf_weight, torch_tensor)}")
87
+
88
+ saver = tf.train.Saver(tf.trainable_variables())
89
+ saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt"))
90
+
91
+
92
+ def main(raw_args=None):
93
+ parser = argparse.ArgumentParser()
94
+ parser.add_argument("--model_name", type=str, required=True, help="model name e.g. google-bert/bert-base-uncased")
95
+ parser.add_argument(
96
+ "--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model"
97
+ )
98
+ parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/<pytorch-model-name>.bin")
99
+ parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model")
100
+ args = parser.parse_args(raw_args)
101
+
102
+ model = BertModel.from_pretrained(
103
+ pretrained_model_name_or_path=args.model_name,
104
+ state_dict=torch.load(args.pytorch_model_path),
105
+ cache_dir=args.cache_dir,
106
+ )
107
+
108
+ convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name)
109
+
110
+
111
+ if __name__ == "__main__":
112
+ main()
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script converts a lm-head checkpoint from the "Token Dropping" implementation into a PyTorch-compatible BERT
17
+ model. The official implementation of "Token Dropping" can be found in the TensorFlow Models repository:
18
+
19
+ https://github.com/tensorflow/models/tree/master/official/projects/token_dropping
20
+ """
21
+ import argparse
22
+
23
+ import tensorflow as tf
24
+ import torch
25
+
26
+ from transformers import BertConfig, BertForMaskedLM
27
+ from transformers.models.bert.modeling_bert import (
28
+ BertIntermediate,
29
+ BertLayer,
30
+ BertOutput,
31
+ BertPooler,
32
+ BertSelfAttention,
33
+ BertSelfOutput,
34
+ )
35
+ from transformers.utils import logging
36
+
37
+
38
+ logging.set_verbosity_info()
39
+
40
+
41
+ def convert_checkpoint_to_pytorch(tf_checkpoint_path: str, config_path: str, pytorch_dump_path: str):
42
+ def get_masked_lm_array(name: str):
43
+ full_name = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
44
+ array = tf.train.load_variable(tf_checkpoint_path, full_name)
45
+
46
+ if "kernel" in name:
47
+ array = array.transpose()
48
+
49
+ return torch.from_numpy(array)
50
+
51
+ def get_encoder_array(name: str):
52
+ full_name = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
53
+ array = tf.train.load_variable(tf_checkpoint_path, full_name)
54
+
55
+ if "kernel" in name:
56
+ array = array.transpose()
57
+
58
+ return torch.from_numpy(array)
59
+
60
+ def get_encoder_layer_array(layer_index: int, name: str):
61
+ full_name = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
62
+ array = tf.train.load_variable(tf_checkpoint_path, full_name)
63
+
64
+ if "kernel" in name:
65
+ array = array.transpose()
66
+
67
+ return torch.from_numpy(array)
68
+
69
+ def get_encoder_attention_layer_array(layer_index: int, name: str, orginal_shape):
70
+ full_name = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
71
+ array = tf.train.load_variable(tf_checkpoint_path, full_name)
72
+ array = array.reshape(orginal_shape)
73
+
74
+ if "kernel" in name:
75
+ array = array.transpose()
76
+
77
+ return torch.from_numpy(array)
78
+
79
+ print(f"Loading model based on config from {config_path}...")
80
+ config = BertConfig.from_json_file(config_path)
81
+ model = BertForMaskedLM(config)
82
+
83
+ # Layers
84
+ for layer_index in range(0, config.num_hidden_layers):
85
+ layer: BertLayer = model.bert.encoder.layer[layer_index]
86
+
87
+ # Self-attention
88
+ self_attn: BertSelfAttention = layer.attention.self
89
+
90
+ self_attn.query.weight.data = get_encoder_attention_layer_array(
91
+ layer_index, "_query_dense/kernel", self_attn.query.weight.data.shape
92
+ )
93
+ self_attn.query.bias.data = get_encoder_attention_layer_array(
94
+ layer_index, "_query_dense/bias", self_attn.query.bias.data.shape
95
+ )
96
+ self_attn.key.weight.data = get_encoder_attention_layer_array(
97
+ layer_index, "_key_dense/kernel", self_attn.key.weight.data.shape
98
+ )
99
+ self_attn.key.bias.data = get_encoder_attention_layer_array(
100
+ layer_index, "_key_dense/bias", self_attn.key.bias.data.shape
101
+ )
102
+ self_attn.value.weight.data = get_encoder_attention_layer_array(
103
+ layer_index, "_value_dense/kernel", self_attn.value.weight.data.shape
104
+ )
105
+ self_attn.value.bias.data = get_encoder_attention_layer_array(
106
+ layer_index, "_value_dense/bias", self_attn.value.bias.data.shape
107
+ )
108
+
109
+ # Self-attention Output
110
+ self_output: BertSelfOutput = layer.attention.output
111
+
112
+ self_output.dense.weight.data = get_encoder_attention_layer_array(
113
+ layer_index, "_output_dense/kernel", self_output.dense.weight.data.shape
114
+ )
115
+ self_output.dense.bias.data = get_encoder_attention_layer_array(
116
+ layer_index, "_output_dense/bias", self_output.dense.bias.data.shape
117
+ )
118
+
119
+ self_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, "_attention_layer_norm/gamma")
120
+ self_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, "_attention_layer_norm/beta")
121
+
122
+ # Intermediate
123
+ intermediate: BertIntermediate = layer.intermediate
124
+
125
+ intermediate.dense.weight.data = get_encoder_layer_array(layer_index, "_intermediate_dense/kernel")
126
+ intermediate.dense.bias.data = get_encoder_layer_array(layer_index, "_intermediate_dense/bias")
127
+
128
+ # Output
129
+ bert_output: BertOutput = layer.output
130
+
131
+ bert_output.dense.weight.data = get_encoder_layer_array(layer_index, "_output_dense/kernel")
132
+ bert_output.dense.bias.data = get_encoder_layer_array(layer_index, "_output_dense/bias")
133
+
134
+ bert_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, "_output_layer_norm/gamma")
135
+ bert_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, "_output_layer_norm/beta")
136
+
137
+ # Embeddings
138
+ model.bert.embeddings.position_embeddings.weight.data = get_encoder_array("_position_embedding_layer/embeddings")
139
+ model.bert.embeddings.token_type_embeddings.weight.data = get_encoder_array("_type_embedding_layer/embeddings")
140
+ model.bert.embeddings.LayerNorm.weight.data = get_encoder_array("_embedding_norm_layer/gamma")
141
+ model.bert.embeddings.LayerNorm.bias.data = get_encoder_array("_embedding_norm_layer/beta")
142
+
143
+ # LM Head
144
+ lm_head = model.cls.predictions.transform
145
+
146
+ lm_head.dense.weight.data = get_masked_lm_array("dense/kernel")
147
+ lm_head.dense.bias.data = get_masked_lm_array("dense/bias")
148
+
149
+ lm_head.LayerNorm.weight.data = get_masked_lm_array("layer_norm/gamma")
150
+ lm_head.LayerNorm.bias.data = get_masked_lm_array("layer_norm/beta")
151
+
152
+ model.bert.embeddings.word_embeddings.weight.data = get_masked_lm_array("embedding_table")
153
+
154
+ # Pooling
155
+ model.bert.pooler = BertPooler(config=config)
156
+ model.bert.pooler.dense.weight.data: BertPooler = get_encoder_array("_pooler_layer/kernel")
157
+ model.bert.pooler.dense.bias.data: BertPooler = get_encoder_array("_pooler_layer/bias")
158
+
159
+ # Export final model
160
+ model.save_pretrained(pytorch_dump_path)
161
+
162
+ # Integration test - should load without any errors ;)
163
+ new_model = BertForMaskedLM.from_pretrained(pytorch_dump_path)
164
+ print(new_model.eval())
165
+
166
+ print("Model conversion was done sucessfully!")
167
+
168
+
169
+ if __name__ == "__main__":
170
+ parser = argparse.ArgumentParser()
171
+ parser.add_argument(
172
+ "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
173
+ )
174
+ parser.add_argument(
175
+ "--bert_config_file",
176
+ type=str,
177
+ required=True,
178
+ help="The config json file corresponding to the BERT model. This specifies the model architecture.",
179
+ )
180
+ parser.add_argument(
181
+ "--pytorch_dump_path",
182
+ type=str,
183
+ required=True,
184
+ help="Path to the output PyTorch model.",
185
+ )
186
+ args = parser.parse_args()
187
+ convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert.py ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Bert."""
16
+
17
+
18
+ import collections
19
+ import os
20
+ import unicodedata
21
+ from typing import List, Optional, Tuple
22
+
23
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
30
+
31
+
32
+ def load_vocab(vocab_file):
33
+ """Loads a vocabulary file into a dictionary."""
34
+ vocab = collections.OrderedDict()
35
+ with open(vocab_file, "r", encoding="utf-8") as reader:
36
+ tokens = reader.readlines()
37
+ for index, token in enumerate(tokens):
38
+ token = token.rstrip("\n")
39
+ vocab[token] = index
40
+ return vocab
41
+
42
+
43
+ def whitespace_tokenize(text):
44
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
45
+ text = text.strip()
46
+ if not text:
47
+ return []
48
+ tokens = text.split()
49
+ return tokens
50
+
51
+
52
+ class BertTokenizer(PreTrainedTokenizer):
53
+ r"""
54
+ Construct a BERT tokenizer. Based on WordPiece.
55
+
56
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
57
+ this superclass for more information regarding those methods.
58
+
59
+ Args:
60
+ vocab_file (`str`):
61
+ File containing the vocabulary.
62
+ do_lower_case (`bool`, *optional*, defaults to `True`):
63
+ Whether or not to lowercase the input when tokenizing.
64
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to do basic tokenization before WordPiece.
66
+ never_split (`Iterable`, *optional*):
67
+ Collection of tokens which will never be split during tokenization. Only has an effect when
68
+ `do_basic_tokenize=True`
69
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
70
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
71
+ token instead.
72
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
73
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
74
+ sequence classification or for a text and a question for question answering. It is also used as the last
75
+ token of a sequence built with special tokens.
76
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
77
+ The token used for padding, for example when batching sequences of different lengths.
78
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
79
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
80
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
81
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
82
+ The token used for masking values. This is the token used when training this model with masked language
83
+ modeling. This is the token which the model will try to predict.
84
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
85
+ Whether or not to tokenize Chinese characters.
86
+
87
+ This should likely be deactivated for Japanese (see this
88
+ [issue](https://github.com/huggingface/transformers/issues/328)).
89
+ strip_accents (`bool`, *optional*):
90
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
91
+ value for `lowercase` (as in the original BERT).
92
+ """
93
+
94
+ vocab_files_names = VOCAB_FILES_NAMES
95
+
96
+ def __init__(
97
+ self,
98
+ vocab_file,
99
+ do_lower_case=True,
100
+ do_basic_tokenize=True,
101
+ never_split=None,
102
+ unk_token="[UNK]",
103
+ sep_token="[SEP]",
104
+ pad_token="[PAD]",
105
+ cls_token="[CLS]",
106
+ mask_token="[MASK]",
107
+ tokenize_chinese_chars=True,
108
+ strip_accents=None,
109
+ **kwargs,
110
+ ):
111
+ if not os.path.isfile(vocab_file):
112
+ raise ValueError(
113
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
114
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
115
+ )
116
+ self.vocab = load_vocab(vocab_file)
117
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
118
+ self.do_basic_tokenize = do_basic_tokenize
119
+ if do_basic_tokenize:
120
+ self.basic_tokenizer = BasicTokenizer(
121
+ do_lower_case=do_lower_case,
122
+ never_split=never_split,
123
+ tokenize_chinese_chars=tokenize_chinese_chars,
124
+ strip_accents=strip_accents,
125
+ )
126
+
127
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
128
+
129
+ super().__init__(
130
+ do_lower_case=do_lower_case,
131
+ do_basic_tokenize=do_basic_tokenize,
132
+ never_split=never_split,
133
+ unk_token=unk_token,
134
+ sep_token=sep_token,
135
+ pad_token=pad_token,
136
+ cls_token=cls_token,
137
+ mask_token=mask_token,
138
+ tokenize_chinese_chars=tokenize_chinese_chars,
139
+ strip_accents=strip_accents,
140
+ **kwargs,
141
+ )
142
+
143
+ @property
144
+ def do_lower_case(self):
145
+ return self.basic_tokenizer.do_lower_case
146
+
147
+ @property
148
+ def vocab_size(self):
149
+ return len(self.vocab)
150
+
151
+ def get_vocab(self):
152
+ return dict(self.vocab, **self.added_tokens_encoder)
153
+
154
+ def _tokenize(self, text, split_special_tokens=False):
155
+ split_tokens = []
156
+ if self.do_basic_tokenize:
157
+ for token in self.basic_tokenizer.tokenize(
158
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
159
+ ):
160
+ # If the token is part of the never_split set
161
+ if token in self.basic_tokenizer.never_split:
162
+ split_tokens.append(token)
163
+ else:
164
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
165
+ else:
166
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
167
+ return split_tokens
168
+
169
+ def _convert_token_to_id(self, token):
170
+ """Converts a token (str) in an id using the vocab."""
171
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
172
+
173
+ def _convert_id_to_token(self, index):
174
+ """Converts an index (integer) in a token (str) using the vocab."""
175
+ return self.ids_to_tokens.get(index, self.unk_token)
176
+
177
+ def convert_tokens_to_string(self, tokens):
178
+ """Converts a sequence of tokens (string) in a single string."""
179
+ out_string = " ".join(tokens).replace(" ##", "").strip()
180
+ return out_string
181
+
182
+ def build_inputs_with_special_tokens(
183
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
184
+ ) -> List[int]:
185
+ """
186
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
187
+ adding special tokens. A BERT sequence has the following format:
188
+
189
+ - single sequence: `[CLS] X [SEP]`
190
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
191
+
192
+ Args:
193
+ token_ids_0 (`List[int]`):
194
+ List of IDs to which the special tokens will be added.
195
+ token_ids_1 (`List[int]`, *optional*):
196
+ Optional second list of IDs for sequence pairs.
197
+
198
+ Returns:
199
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
200
+ """
201
+ if token_ids_1 is None:
202
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
203
+ cls = [self.cls_token_id]
204
+ sep = [self.sep_token_id]
205
+ return cls + token_ids_0 + sep + token_ids_1 + sep
206
+
207
+ def get_special_tokens_mask(
208
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
209
+ ) -> List[int]:
210
+ """
211
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
212
+ special tokens using the tokenizer `prepare_for_model` method.
213
+
214
+ Args:
215
+ token_ids_0 (`List[int]`):
216
+ List of IDs.
217
+ token_ids_1 (`List[int]`, *optional*):
218
+ Optional second list of IDs for sequence pairs.
219
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
220
+ Whether or not the token list is already formatted with special tokens for the model.
221
+
222
+ Returns:
223
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
224
+ """
225
+
226
+ if already_has_special_tokens:
227
+ return super().get_special_tokens_mask(
228
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
229
+ )
230
+
231
+ if token_ids_1 is not None:
232
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
233
+ return [1] + ([0] * len(token_ids_0)) + [1]
234
+
235
+ def create_token_type_ids_from_sequences(
236
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
237
+ ) -> List[int]:
238
+ """
239
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
240
+ pair mask has the following format:
241
+
242
+ ```
243
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
244
+ | first sequence | second sequence |
245
+ ```
246
+
247
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
248
+
249
+ Args:
250
+ token_ids_0 (`List[int]`):
251
+ List of IDs.
252
+ token_ids_1 (`List[int]`, *optional*):
253
+ Optional second list of IDs for sequence pairs.
254
+
255
+ Returns:
256
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
257
+ """
258
+ sep = [self.sep_token_id]
259
+ cls = [self.cls_token_id]
260
+ if token_ids_1 is None:
261
+ return len(cls + token_ids_0 + sep) * [0]
262
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
263
+
264
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
265
+ index = 0
266
+ if os.path.isdir(save_directory):
267
+ vocab_file = os.path.join(
268
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
269
+ )
270
+ else:
271
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
272
+ with open(vocab_file, "w", encoding="utf-8") as writer:
273
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
274
+ if index != token_index:
275
+ logger.warning(
276
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
277
+ " Please check that the vocabulary is not corrupted!"
278
+ )
279
+ index = token_index
280
+ writer.write(token + "\n")
281
+ index += 1
282
+ return (vocab_file,)
283
+
284
+
285
+ class BasicTokenizer(object):
286
+ """
287
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
288
+
289
+ Args:
290
+ do_lower_case (`bool`, *optional*, defaults to `True`):
291
+ Whether or not to lowercase the input when tokenizing.
292
+ never_split (`Iterable`, *optional*):
293
+ Collection of tokens which will never be split during tokenization. Only has an effect when
294
+ `do_basic_tokenize=True`
295
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
296
+ Whether or not to tokenize Chinese characters.
297
+
298
+ This should likely be deactivated for Japanese (see this
299
+ [issue](https://github.com/huggingface/transformers/issues/328)).
300
+ strip_accents (`bool`, *optional*):
301
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
302
+ value for `lowercase` (as in the original BERT).
303
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
304
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
305
+ the full context of the words, such as contractions.
306
+ """
307
+
308
+ def __init__(
309
+ self,
310
+ do_lower_case=True,
311
+ never_split=None,
312
+ tokenize_chinese_chars=True,
313
+ strip_accents=None,
314
+ do_split_on_punc=True,
315
+ ):
316
+ if never_split is None:
317
+ never_split = []
318
+ self.do_lower_case = do_lower_case
319
+ self.never_split = set(never_split)
320
+ self.tokenize_chinese_chars = tokenize_chinese_chars
321
+ self.strip_accents = strip_accents
322
+ self.do_split_on_punc = do_split_on_punc
323
+
324
+ def tokenize(self, text, never_split=None):
325
+ """
326
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
327
+
328
+ Args:
329
+ never_split (`List[str]`, *optional*)
330
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
331
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
332
+ """
333
+ # union() returns a new set by concatenating the two sets.
334
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
335
+ text = self._clean_text(text)
336
+
337
+ # This was added on November 1st, 2018 for the multilingual and Chinese
338
+ # models. This is also applied to the English models now, but it doesn't
339
+ # matter since the English models were not trained on any Chinese data
340
+ # and generally don't have any Chinese data in them (there are Chinese
341
+ # characters in the vocabulary because Wikipedia does have some Chinese
342
+ # words in the English Wikipedia.).
343
+ if self.tokenize_chinese_chars:
344
+ text = self._tokenize_chinese_chars(text)
345
+ # prevents treating the same character with different unicode codepoints as different characters
346
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
347
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
348
+ split_tokens = []
349
+ for token in orig_tokens:
350
+ if token not in never_split:
351
+ if self.do_lower_case:
352
+ token = token.lower()
353
+ if self.strip_accents is not False:
354
+ token = self._run_strip_accents(token)
355
+ elif self.strip_accents:
356
+ token = self._run_strip_accents(token)
357
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
358
+
359
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
360
+ return output_tokens
361
+
362
+ def _run_strip_accents(self, text):
363
+ """Strips accents from a piece of text."""
364
+ text = unicodedata.normalize("NFD", text)
365
+ output = []
366
+ for char in text:
367
+ cat = unicodedata.category(char)
368
+ if cat == "Mn":
369
+ continue
370
+ output.append(char)
371
+ return "".join(output)
372
+
373
+ def _run_split_on_punc(self, text, never_split=None):
374
+ """Splits punctuation on a piece of text."""
375
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
376
+ return [text]
377
+ chars = list(text)
378
+ i = 0
379
+ start_new_word = True
380
+ output = []
381
+ while i < len(chars):
382
+ char = chars[i]
383
+ if _is_punctuation(char):
384
+ output.append([char])
385
+ start_new_word = True
386
+ else:
387
+ if start_new_word:
388
+ output.append([])
389
+ start_new_word = False
390
+ output[-1].append(char)
391
+ i += 1
392
+
393
+ return ["".join(x) for x in output]
394
+
395
+ def _tokenize_chinese_chars(self, text):
396
+ """Adds whitespace around any CJK character."""
397
+ output = []
398
+ for char in text:
399
+ cp = ord(char)
400
+ if self._is_chinese_char(cp):
401
+ output.append(" ")
402
+ output.append(char)
403
+ output.append(" ")
404
+ else:
405
+ output.append(char)
406
+ return "".join(output)
407
+
408
+ def _is_chinese_char(self, cp):
409
+ """Checks whether CP is the codepoint of a CJK character."""
410
+ # This defines a "chinese character" as anything in the CJK Unicode block:
411
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
412
+ #
413
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
414
+ # despite its name. The modern Korean Hangul alphabet is a different block,
415
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
416
+ # space-separated words, so they are not treated specially and handled
417
+ # like the all of the other languages.
418
+ if (
419
+ (cp >= 0x4E00 and cp <= 0x9FFF)
420
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
421
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
422
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
423
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
424
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
425
+ or (cp >= 0xF900 and cp <= 0xFAFF)
426
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
427
+ ): #
428
+ return True
429
+
430
+ return False
431
+
432
+ def _clean_text(self, text):
433
+ """Performs invalid character removal and whitespace cleanup on text."""
434
+ output = []
435
+ for char in text:
436
+ cp = ord(char)
437
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
438
+ continue
439
+ if _is_whitespace(char):
440
+ output.append(" ")
441
+ else:
442
+ output.append(char)
443
+ return "".join(output)
444
+
445
+
446
+ class WordpieceTokenizer(object):
447
+ """Runs WordPiece tokenization."""
448
+
449
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
450
+ self.vocab = vocab
451
+ self.unk_token = unk_token
452
+ self.max_input_chars_per_word = max_input_chars_per_word
453
+
454
+ def tokenize(self, text):
455
+ """
456
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
457
+ tokenization using the given vocabulary.
458
+
459
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
460
+
461
+ Args:
462
+ text: A single token or whitespace separated tokens. This should have
463
+ already been passed through *BasicTokenizer*.
464
+
465
+ Returns:
466
+ A list of wordpiece tokens.
467
+ """
468
+
469
+ output_tokens = []
470
+ for token in whitespace_tokenize(text):
471
+ chars = list(token)
472
+ if len(chars) > self.max_input_chars_per_word:
473
+ output_tokens.append(self.unk_token)
474
+ continue
475
+
476
+ is_bad = False
477
+ start = 0
478
+ sub_tokens = []
479
+ while start < len(chars):
480
+ end = len(chars)
481
+ cur_substr = None
482
+ while start < end:
483
+ substr = "".join(chars[start:end])
484
+ if start > 0:
485
+ substr = "##" + substr
486
+ if substr in self.vocab:
487
+ cur_substr = substr
488
+ break
489
+ end -= 1
490
+ if cur_substr is None:
491
+ is_bad = True
492
+ break
493
+ sub_tokens.append(cur_substr)
494
+ start = end
495
+
496
+ if is_bad:
497
+ output_tokens.append(self.unk_token)
498
+ else:
499
+ output_tokens.extend(sub_tokens)
500
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__init__.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
27
+ "tokenization_convbert": ["ConvBertTokenizer"],
28
+ }
29
+
30
+ try:
31
+ if not is_tokenizers_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_convbert_fast"] = ["ConvBertTokenizerFast"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_convbert"] = [
45
+ "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "ConvBertForMaskedLM",
47
+ "ConvBertForMultipleChoice",
48
+ "ConvBertForQuestionAnswering",
49
+ "ConvBertForSequenceClassification",
50
+ "ConvBertForTokenClassification",
51
+ "ConvBertLayer",
52
+ "ConvBertModel",
53
+ "ConvBertPreTrainedModel",
54
+ "load_tf_weights_in_convbert",
55
+ ]
56
+
57
+
58
+ try:
59
+ if not is_tf_available():
60
+ raise OptionalDependencyNotAvailable()
61
+ except OptionalDependencyNotAvailable:
62
+ pass
63
+ else:
64
+ _import_structure["modeling_tf_convbert"] = [
65
+ "TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
66
+ "TFConvBertForMaskedLM",
67
+ "TFConvBertForMultipleChoice",
68
+ "TFConvBertForQuestionAnswering",
69
+ "TFConvBertForSequenceClassification",
70
+ "TFConvBertForTokenClassification",
71
+ "TFConvBertLayer",
72
+ "TFConvBertModel",
73
+ "TFConvBertPreTrainedModel",
74
+ ]
75
+
76
+
77
+ if TYPE_CHECKING:
78
+ from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
79
+ from .tokenization_convbert import ConvBertTokenizer
80
+
81
+ try:
82
+ if not is_tokenizers_available():
83
+ raise OptionalDependencyNotAvailable()
84
+ except OptionalDependencyNotAvailable:
85
+ pass
86
+ else:
87
+ from .tokenization_convbert_fast import ConvBertTokenizerFast
88
+
89
+ try:
90
+ if not is_torch_available():
91
+ raise OptionalDependencyNotAvailable()
92
+ except OptionalDependencyNotAvailable:
93
+ pass
94
+ else:
95
+ from .modeling_convbert import (
96
+ CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
97
+ ConvBertForMaskedLM,
98
+ ConvBertForMultipleChoice,
99
+ ConvBertForQuestionAnswering,
100
+ ConvBertForSequenceClassification,
101
+ ConvBertForTokenClassification,
102
+ ConvBertLayer,
103
+ ConvBertModel,
104
+ ConvBertPreTrainedModel,
105
+ load_tf_weights_in_convbert,
106
+ )
107
+
108
+ try:
109
+ if not is_tf_available():
110
+ raise OptionalDependencyNotAvailable()
111
+ except OptionalDependencyNotAvailable:
112
+ pass
113
+ else:
114
+ from .modeling_tf_convbert import (
115
+ TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
116
+ TFConvBertForMaskedLM,
117
+ TFConvBertForMultipleChoice,
118
+ TFConvBertForQuestionAnswering,
119
+ TFConvBertForSequenceClassification,
120
+ TFConvBertForTokenClassification,
121
+ TFConvBertLayer,
122
+ TFConvBertModel,
123
+ TFConvBertPreTrainedModel,
124
+ )
125
+
126
+
127
+ else:
128
+ import sys
129
+
130
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/configuration_convbert.cpython-310.pyc ADDED
Binary file (6.09 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_convbert.cpython-310.pyc ADDED
Binary file (38.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/modeling_tf_convbert.cpython-310.pyc ADDED
Binary file (43.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc ADDED
Binary file (6.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright The HuggingFace team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ConvBERT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class ConvBertConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`ConvBertModel`]. It is used to instantiate an
34
+ ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
35
+ with the defaults will yield a similar configuration to that of the ConvBERT
36
+ [YituTech/conv-bert-base](https://huggingface.co/YituTech/conv-bert-base) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 30522):
44
+ Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by
45
+ the `inputs_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`].
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimensionality of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the `token_type_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`].
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
69
+ The epsilon used by the layer normalization layers.
70
+ head_ratio (`int`, *optional*, defaults to 2):
71
+ Ratio gamma to reduce the number of attention heads.
72
+ num_groups (`int`, *optional*, defaults to 1):
73
+ The number of groups for grouped linear layers for ConvBert model
74
+ conv_kernel_size (`int`, *optional*, defaults to 9):
75
+ The size of the convolutional kernel.
76
+ classifier_dropout (`float`, *optional*):
77
+ The dropout ratio for the classification head.
78
+
79
+ Example:
80
+
81
+ ```python
82
+ >>> from transformers import ConvBertConfig, ConvBertModel
83
+
84
+ >>> # Initializing a ConvBERT convbert-base-uncased style configuration
85
+ >>> configuration = ConvBertConfig()
86
+
87
+ >>> # Initializing a model (with random weights) from the convbert-base-uncased style configuration
88
+ >>> model = ConvBertModel(configuration)
89
+
90
+ >>> # Accessing the model configuration
91
+ >>> configuration = model.config
92
+ ```"""
93
+
94
+ model_type = "convbert"
95
+
96
+ def __init__(
97
+ self,
98
+ vocab_size=30522,
99
+ hidden_size=768,
100
+ num_hidden_layers=12,
101
+ num_attention_heads=12,
102
+ intermediate_size=3072,
103
+ hidden_act="gelu",
104
+ hidden_dropout_prob=0.1,
105
+ attention_probs_dropout_prob=0.1,
106
+ max_position_embeddings=512,
107
+ type_vocab_size=2,
108
+ initializer_range=0.02,
109
+ layer_norm_eps=1e-12,
110
+ pad_token_id=1,
111
+ bos_token_id=0,
112
+ eos_token_id=2,
113
+ embedding_size=768,
114
+ head_ratio=2,
115
+ conv_kernel_size=9,
116
+ num_groups=1,
117
+ classifier_dropout=None,
118
+ **kwargs,
119
+ ):
120
+ super().__init__(
121
+ pad_token_id=pad_token_id,
122
+ bos_token_id=bos_token_id,
123
+ eos_token_id=eos_token_id,
124
+ **kwargs,
125
+ )
126
+
127
+ self.vocab_size = vocab_size
128
+ self.hidden_size = hidden_size
129
+ self.num_hidden_layers = num_hidden_layers
130
+ self.num_attention_heads = num_attention_heads
131
+ self.intermediate_size = intermediate_size
132
+ self.hidden_act = hidden_act
133
+ self.hidden_dropout_prob = hidden_dropout_prob
134
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
135
+ self.max_position_embeddings = max_position_embeddings
136
+ self.type_vocab_size = type_vocab_size
137
+ self.initializer_range = initializer_range
138
+ self.layer_norm_eps = layer_norm_eps
139
+ self.embedding_size = embedding_size
140
+ self.head_ratio = head_ratio
141
+ self.conv_kernel_size = conv_kernel_size
142
+ self.num_groups = num_groups
143
+ self.classifier_dropout = classifier_dropout
144
+
145
+
146
+ # Copied from transformers.models.bert.configuration_bert.BertOnnxConfig
147
+ class ConvBertOnnxConfig(OnnxConfig):
148
+ @property
149
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
150
+ if self.task == "multiple-choice":
151
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
152
+ else:
153
+ dynamic_axis = {0: "batch", 1: "sequence"}
154
+ return OrderedDict(
155
+ [
156
+ ("input_ids", dynamic_axis),
157
+ ("attention_mask", dynamic_axis),
158
+ ("token_type_ids", dynamic_axis),
159
+ ]
160
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ConvBERT checkpoint."""
16
+
17
+ import argparse
18
+
19
+ from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert
20
+ from transformers.utils import logging
21
+
22
+
23
+ logging.set_verbosity_info()
24
+
25
+
26
+ def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path):
27
+ conf = ConvBertConfig.from_json_file(convbert_config_file)
28
+ model = ConvBertModel(conf)
29
+
30
+ model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path)
31
+ model.save_pretrained(pytorch_dump_path)
32
+
33
+ tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True)
34
+ tf_model.save_pretrained(pytorch_dump_path)
35
+
36
+
37
+ if __name__ == "__main__":
38
+ parser = argparse.ArgumentParser()
39
+ # Required parameters
40
+ parser.add_argument(
41
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
42
+ )
43
+ parser.add_argument(
44
+ "--convbert_config_file",
45
+ default=None,
46
+ type=str,
47
+ required=True,
48
+ help=(
49
+ "The config json file corresponding to the pre-trained ConvBERT model. \n"
50
+ "This specifies the model architecture."
51
+ ),
52
+ )
53
+ parser.add_argument(
54
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
55
+ )
56
+ args = parser.parse_args()
57
+ convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/modeling_convbert.py ADDED
@@ -0,0 +1,1337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ConvBERT model."""
16
+
17
+
18
+ import math
19
+ import os
20
+ from operator import attrgetter
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from ...activations import ACT2FN, get_activation
29
+ from ...modeling_outputs import (
30
+ BaseModelOutputWithCrossAttentions,
31
+ MaskedLMOutput,
32
+ MultipleChoiceModelOutput,
33
+ QuestionAnsweringModelOutput,
34
+ SequenceClassifierOutput,
35
+ TokenClassifierOutput,
36
+ )
37
+ from ...modeling_utils import PreTrainedModel, SequenceSummary
38
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
39
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
40
+ from .configuration_convbert import ConvBertConfig
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ _CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
46
+ _CONFIG_FOR_DOC = "ConvBertConfig"
47
+
48
+
49
+ from ..deprecated._archive_maps import CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
50
+
51
+
52
+ def load_tf_weights_in_convbert(model, config, tf_checkpoint_path):
53
+ """Load tf checkpoints in a pytorch model."""
54
+ try:
55
+ import tensorflow as tf
56
+ except ImportError:
57
+ logger.error(
58
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
59
+ "https://www.tensorflow.org/install/ for installation instructions."
60
+ )
61
+ raise
62
+ tf_path = os.path.abspath(tf_checkpoint_path)
63
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
64
+ # Load weights from TF model
65
+ init_vars = tf.train.list_variables(tf_path)
66
+ tf_data = {}
67
+ for name, shape in init_vars:
68
+ logger.info(f"Loading TF weight {name} with shape {shape}")
69
+ array = tf.train.load_variable(tf_path, name)
70
+ tf_data[name] = array
71
+
72
+ param_mapping = {
73
+ "embeddings.word_embeddings.weight": "electra/embeddings/word_embeddings",
74
+ "embeddings.position_embeddings.weight": "electra/embeddings/position_embeddings",
75
+ "embeddings.token_type_embeddings.weight": "electra/embeddings/token_type_embeddings",
76
+ "embeddings.LayerNorm.weight": "electra/embeddings/LayerNorm/gamma",
77
+ "embeddings.LayerNorm.bias": "electra/embeddings/LayerNorm/beta",
78
+ "embeddings_project.weight": "electra/embeddings_project/kernel",
79
+ "embeddings_project.bias": "electra/embeddings_project/bias",
80
+ }
81
+ if config.num_groups > 1:
82
+ group_dense_name = "g_dense"
83
+ else:
84
+ group_dense_name = "dense"
85
+
86
+ for j in range(config.num_hidden_layers):
87
+ param_mapping[
88
+ f"encoder.layer.{j}.attention.self.query.weight"
89
+ ] = f"electra/encoder/layer_{j}/attention/self/query/kernel"
90
+ param_mapping[
91
+ f"encoder.layer.{j}.attention.self.query.bias"
92
+ ] = f"electra/encoder/layer_{j}/attention/self/query/bias"
93
+ param_mapping[
94
+ f"encoder.layer.{j}.attention.self.key.weight"
95
+ ] = f"electra/encoder/layer_{j}/attention/self/key/kernel"
96
+ param_mapping[
97
+ f"encoder.layer.{j}.attention.self.key.bias"
98
+ ] = f"electra/encoder/layer_{j}/attention/self/key/bias"
99
+ param_mapping[
100
+ f"encoder.layer.{j}.attention.self.value.weight"
101
+ ] = f"electra/encoder/layer_{j}/attention/self/value/kernel"
102
+ param_mapping[
103
+ f"encoder.layer.{j}.attention.self.value.bias"
104
+ ] = f"electra/encoder/layer_{j}/attention/self/value/bias"
105
+ param_mapping[
106
+ f"encoder.layer.{j}.attention.self.key_conv_attn_layer.depthwise.weight"
107
+ ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/depthwise_kernel"
108
+ param_mapping[
109
+ f"encoder.layer.{j}.attention.self.key_conv_attn_layer.pointwise.weight"
110
+ ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/pointwise_kernel"
111
+ param_mapping[
112
+ f"encoder.layer.{j}.attention.self.key_conv_attn_layer.bias"
113
+ ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/bias"
114
+ param_mapping[
115
+ f"encoder.layer.{j}.attention.self.conv_kernel_layer.weight"
116
+ ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/kernel"
117
+ param_mapping[
118
+ f"encoder.layer.{j}.attention.self.conv_kernel_layer.bias"
119
+ ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/bias"
120
+ param_mapping[
121
+ f"encoder.layer.{j}.attention.self.conv_out_layer.weight"
122
+ ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_point/kernel"
123
+ param_mapping[
124
+ f"encoder.layer.{j}.attention.self.conv_out_layer.bias"
125
+ ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_point/bias"
126
+ param_mapping[
127
+ f"encoder.layer.{j}.attention.output.dense.weight"
128
+ ] = f"electra/encoder/layer_{j}/attention/output/dense/kernel"
129
+ param_mapping[
130
+ f"encoder.layer.{j}.attention.output.LayerNorm.weight"
131
+ ] = f"electra/encoder/layer_{j}/attention/output/LayerNorm/gamma"
132
+ param_mapping[
133
+ f"encoder.layer.{j}.attention.output.dense.bias"
134
+ ] = f"electra/encoder/layer_{j}/attention/output/dense/bias"
135
+ param_mapping[
136
+ f"encoder.layer.{j}.attention.output.LayerNorm.bias"
137
+ ] = f"electra/encoder/layer_{j}/attention/output/LayerNorm/beta"
138
+ param_mapping[
139
+ f"encoder.layer.{j}.intermediate.dense.weight"
140
+ ] = f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/kernel"
141
+ param_mapping[
142
+ f"encoder.layer.{j}.intermediate.dense.bias"
143
+ ] = f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/bias"
144
+ param_mapping[
145
+ f"encoder.layer.{j}.output.dense.weight"
146
+ ] = f"electra/encoder/layer_{j}/output/{group_dense_name}/kernel"
147
+ param_mapping[
148
+ f"encoder.layer.{j}.output.dense.bias"
149
+ ] = f"electra/encoder/layer_{j}/output/{group_dense_name}/bias"
150
+ param_mapping[
151
+ f"encoder.layer.{j}.output.LayerNorm.weight"
152
+ ] = f"electra/encoder/layer_{j}/output/LayerNorm/gamma"
153
+ param_mapping[f"encoder.layer.{j}.output.LayerNorm.bias"] = f"electra/encoder/layer_{j}/output/LayerNorm/beta"
154
+
155
+ for param in model.named_parameters():
156
+ param_name = param[0]
157
+ retriever = attrgetter(param_name)
158
+ result = retriever(model)
159
+ tf_name = param_mapping[param_name]
160
+ value = torch.from_numpy(tf_data[tf_name])
161
+ logger.info(f"TF: {tf_name}, PT: {param_name} ")
162
+ if tf_name.endswith("/kernel"):
163
+ if not tf_name.endswith("/intermediate/g_dense/kernel"):
164
+ if not tf_name.endswith("/output/g_dense/kernel"):
165
+ value = value.T
166
+ if tf_name.endswith("/depthwise_kernel"):
167
+ value = value.permute(1, 2, 0) # 2, 0, 1
168
+ if tf_name.endswith("/pointwise_kernel"):
169
+ value = value.permute(2, 1, 0) # 2, 1, 0
170
+ if tf_name.endswith("/conv_attn_key/bias"):
171
+ value = value.unsqueeze(-1)
172
+ result.data = value
173
+ return model
174
+
175
+
176
+ class ConvBertEmbeddings(nn.Module):
177
+ """Construct the embeddings from word, position and token_type embeddings."""
178
+
179
+ def __init__(self, config):
180
+ super().__init__()
181
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
182
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
183
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
184
+
185
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
186
+ # any TensorFlow checkpoint file
187
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
188
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
189
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
190
+ self.register_buffer(
191
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
192
+ )
193
+ self.register_buffer(
194
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
195
+ )
196
+
197
+ def forward(
198
+ self,
199
+ input_ids: Optional[torch.LongTensor] = None,
200
+ token_type_ids: Optional[torch.LongTensor] = None,
201
+ position_ids: Optional[torch.LongTensor] = None,
202
+ inputs_embeds: Optional[torch.FloatTensor] = None,
203
+ ) -> torch.LongTensor:
204
+ if input_ids is not None:
205
+ input_shape = input_ids.size()
206
+ else:
207
+ input_shape = inputs_embeds.size()[:-1]
208
+
209
+ seq_length = input_shape[1]
210
+
211
+ if position_ids is None:
212
+ position_ids = self.position_ids[:, :seq_length]
213
+
214
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
215
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
216
+ # issue #5664
217
+ if token_type_ids is None:
218
+ if hasattr(self, "token_type_ids"):
219
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
220
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
221
+ token_type_ids = buffered_token_type_ids_expanded
222
+ else:
223
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
224
+
225
+ if inputs_embeds is None:
226
+ inputs_embeds = self.word_embeddings(input_ids)
227
+ position_embeddings = self.position_embeddings(position_ids)
228
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
229
+
230
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
231
+ embeddings = self.LayerNorm(embeddings)
232
+ embeddings = self.dropout(embeddings)
233
+ return embeddings
234
+
235
+
236
+ class ConvBertPreTrainedModel(PreTrainedModel):
237
+ """
238
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
239
+ models.
240
+ """
241
+
242
+ config_class = ConvBertConfig
243
+ load_tf_weights = load_tf_weights_in_convbert
244
+ base_model_prefix = "convbert"
245
+ supports_gradient_checkpointing = True
246
+
247
+ def _init_weights(self, module):
248
+ """Initialize the weights"""
249
+ if isinstance(module, nn.Linear):
250
+ # Slightly different from the TF version which uses truncated_normal for initialization
251
+ # cf https://github.com/pytorch/pytorch/pull/5617
252
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
253
+ if module.bias is not None:
254
+ module.bias.data.zero_()
255
+ elif isinstance(module, nn.Embedding):
256
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
257
+ if module.padding_idx is not None:
258
+ module.weight.data[module.padding_idx].zero_()
259
+ elif isinstance(module, nn.LayerNorm):
260
+ module.bias.data.zero_()
261
+ module.weight.data.fill_(1.0)
262
+
263
+
264
+ class SeparableConv1D(nn.Module):
265
+ """This class implements separable convolution, i.e. a depthwise and a pointwise layer"""
266
+
267
+ def __init__(self, config, input_filters, output_filters, kernel_size, **kwargs):
268
+ super().__init__()
269
+ self.depthwise = nn.Conv1d(
270
+ input_filters,
271
+ input_filters,
272
+ kernel_size=kernel_size,
273
+ groups=input_filters,
274
+ padding=kernel_size // 2,
275
+ bias=False,
276
+ )
277
+ self.pointwise = nn.Conv1d(input_filters, output_filters, kernel_size=1, bias=False)
278
+ self.bias = nn.Parameter(torch.zeros(output_filters, 1))
279
+
280
+ self.depthwise.weight.data.normal_(mean=0.0, std=config.initializer_range)
281
+ self.pointwise.weight.data.normal_(mean=0.0, std=config.initializer_range)
282
+
283
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
284
+ x = self.depthwise(hidden_states)
285
+ x = self.pointwise(x)
286
+ x += self.bias
287
+ return x
288
+
289
+
290
+ class ConvBertSelfAttention(nn.Module):
291
+ def __init__(self, config):
292
+ super().__init__()
293
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
294
+ raise ValueError(
295
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
296
+ f"heads ({config.num_attention_heads})"
297
+ )
298
+
299
+ new_num_attention_heads = config.num_attention_heads // config.head_ratio
300
+ if new_num_attention_heads < 1:
301
+ self.head_ratio = config.num_attention_heads
302
+ self.num_attention_heads = 1
303
+ else:
304
+ self.num_attention_heads = new_num_attention_heads
305
+ self.head_ratio = config.head_ratio
306
+
307
+ self.conv_kernel_size = config.conv_kernel_size
308
+ if config.hidden_size % self.num_attention_heads != 0:
309
+ raise ValueError("hidden_size should be divisible by num_attention_heads")
310
+
311
+ self.attention_head_size = (config.hidden_size // self.num_attention_heads) // 2
312
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
313
+
314
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
315
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
316
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
317
+
318
+ self.key_conv_attn_layer = SeparableConv1D(
319
+ config, config.hidden_size, self.all_head_size, self.conv_kernel_size
320
+ )
321
+ self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size)
322
+ self.conv_out_layer = nn.Linear(config.hidden_size, self.all_head_size)
323
+
324
+ self.unfold = nn.Unfold(
325
+ kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0]
326
+ )
327
+
328
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
329
+
330
+ def transpose_for_scores(self, x):
331
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
332
+ x = x.view(*new_x_shape)
333
+ return x.permute(0, 2, 1, 3)
334
+
335
+ def forward(
336
+ self,
337
+ hidden_states: torch.Tensor,
338
+ attention_mask: Optional[torch.FloatTensor] = None,
339
+ head_mask: Optional[torch.FloatTensor] = None,
340
+ encoder_hidden_states: Optional[torch.Tensor] = None,
341
+ output_attentions: Optional[bool] = False,
342
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
343
+ mixed_query_layer = self.query(hidden_states)
344
+ batch_size = hidden_states.size(0)
345
+ # If this is instantiated as a cross-attention module, the keys
346
+ # and values come from an encoder; the attention mask needs to be
347
+ # such that the encoder's padding tokens are not attended to.
348
+ if encoder_hidden_states is not None:
349
+ mixed_key_layer = self.key(encoder_hidden_states)
350
+ mixed_value_layer = self.value(encoder_hidden_states)
351
+ else:
352
+ mixed_key_layer = self.key(hidden_states)
353
+ mixed_value_layer = self.value(hidden_states)
354
+
355
+ mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states.transpose(1, 2))
356
+ mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2)
357
+
358
+ query_layer = self.transpose_for_scores(mixed_query_layer)
359
+ key_layer = self.transpose_for_scores(mixed_key_layer)
360
+ value_layer = self.transpose_for_scores(mixed_value_layer)
361
+ conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
362
+
363
+ conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
364
+ conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
365
+ conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1)
366
+
367
+ conv_out_layer = self.conv_out_layer(hidden_states)
368
+ conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
369
+ conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1)
370
+ conv_out_layer = nn.functional.unfold(
371
+ conv_out_layer,
372
+ kernel_size=[self.conv_kernel_size, 1],
373
+ dilation=1,
374
+ padding=[(self.conv_kernel_size - 1) // 2, 0],
375
+ stride=1,
376
+ )
377
+ conv_out_layer = conv_out_layer.transpose(1, 2).reshape(
378
+ batch_size, -1, self.all_head_size, self.conv_kernel_size
379
+ )
380
+ conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
381
+ conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer)
382
+ conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size])
383
+
384
+ # Take the dot product between "query" and "key" to get the raw attention scores.
385
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
386
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
387
+ if attention_mask is not None:
388
+ # Apply the attention mask is (precomputed for all layers in ConvBertModel forward() function)
389
+ attention_scores = attention_scores + attention_mask
390
+
391
+ # Normalize the attention scores to probabilities.
392
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
393
+
394
+ # This is actually dropping out entire tokens to attend to, which might
395
+ # seem a bit unusual, but is taken from the original Transformer paper.
396
+ attention_probs = self.dropout(attention_probs)
397
+
398
+ # Mask heads if we want to
399
+ if head_mask is not None:
400
+ attention_probs = attention_probs * head_mask
401
+
402
+ context_layer = torch.matmul(attention_probs, value_layer)
403
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
404
+
405
+ conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
406
+ context_layer = torch.cat([context_layer, conv_out], 2)
407
+
408
+ # conv and context
409
+ new_context_layer_shape = context_layer.size()[:-2] + (
410
+ self.num_attention_heads * self.attention_head_size * 2,
411
+ )
412
+ context_layer = context_layer.view(*new_context_layer_shape)
413
+
414
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
415
+ return outputs
416
+
417
+
418
+ class ConvBertSelfOutput(nn.Module):
419
+ def __init__(self, config):
420
+ super().__init__()
421
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
422
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
423
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
424
+
425
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
426
+ hidden_states = self.dense(hidden_states)
427
+ hidden_states = self.dropout(hidden_states)
428
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
429
+ return hidden_states
430
+
431
+
432
+ class ConvBertAttention(nn.Module):
433
+ def __init__(self, config):
434
+ super().__init__()
435
+ self.self = ConvBertSelfAttention(config)
436
+ self.output = ConvBertSelfOutput(config)
437
+ self.pruned_heads = set()
438
+
439
+ def prune_heads(self, heads):
440
+ if len(heads) == 0:
441
+ return
442
+ heads, index = find_pruneable_heads_and_indices(
443
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
444
+ )
445
+
446
+ # Prune linear layers
447
+ self.self.query = prune_linear_layer(self.self.query, index)
448
+ self.self.key = prune_linear_layer(self.self.key, index)
449
+ self.self.value = prune_linear_layer(self.self.value, index)
450
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
451
+
452
+ # Update hyper params and store pruned heads
453
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
454
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
455
+ self.pruned_heads = self.pruned_heads.union(heads)
456
+
457
+ def forward(
458
+ self,
459
+ hidden_states: torch.Tensor,
460
+ attention_mask: Optional[torch.FloatTensor] = None,
461
+ head_mask: Optional[torch.FloatTensor] = None,
462
+ encoder_hidden_states: Optional[torch.Tensor] = None,
463
+ output_attentions: Optional[bool] = False,
464
+ ) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]:
465
+ self_outputs = self.self(
466
+ hidden_states,
467
+ attention_mask,
468
+ head_mask,
469
+ encoder_hidden_states,
470
+ output_attentions,
471
+ )
472
+ attention_output = self.output(self_outputs[0], hidden_states)
473
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
474
+ return outputs
475
+
476
+
477
+ class GroupedLinearLayer(nn.Module):
478
+ def __init__(self, input_size, output_size, num_groups):
479
+ super().__init__()
480
+ self.input_size = input_size
481
+ self.output_size = output_size
482
+ self.num_groups = num_groups
483
+ self.group_in_dim = self.input_size // self.num_groups
484
+ self.group_out_dim = self.output_size // self.num_groups
485
+ self.weight = nn.Parameter(torch.empty(self.num_groups, self.group_in_dim, self.group_out_dim))
486
+ self.bias = nn.Parameter(torch.empty(output_size))
487
+
488
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
489
+ batch_size = list(hidden_states.size())[0]
490
+ x = torch.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim])
491
+ x = x.permute(1, 0, 2)
492
+ x = torch.matmul(x, self.weight)
493
+ x = x.permute(1, 0, 2)
494
+ x = torch.reshape(x, [batch_size, -1, self.output_size])
495
+ x = x + self.bias
496
+ return x
497
+
498
+
499
+ class ConvBertIntermediate(nn.Module):
500
+ def __init__(self, config):
501
+ super().__init__()
502
+ if config.num_groups == 1:
503
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
504
+ else:
505
+ self.dense = GroupedLinearLayer(
506
+ input_size=config.hidden_size, output_size=config.intermediate_size, num_groups=config.num_groups
507
+ )
508
+ if isinstance(config.hidden_act, str):
509
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
510
+ else:
511
+ self.intermediate_act_fn = config.hidden_act
512
+
513
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
514
+ hidden_states = self.dense(hidden_states)
515
+ hidden_states = self.intermediate_act_fn(hidden_states)
516
+ return hidden_states
517
+
518
+
519
+ class ConvBertOutput(nn.Module):
520
+ def __init__(self, config):
521
+ super().__init__()
522
+ if config.num_groups == 1:
523
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
524
+ else:
525
+ self.dense = GroupedLinearLayer(
526
+ input_size=config.intermediate_size, output_size=config.hidden_size, num_groups=config.num_groups
527
+ )
528
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
529
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
530
+
531
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
532
+ hidden_states = self.dense(hidden_states)
533
+ hidden_states = self.dropout(hidden_states)
534
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
535
+ return hidden_states
536
+
537
+
538
+ class ConvBertLayer(nn.Module):
539
+ def __init__(self, config):
540
+ super().__init__()
541
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
542
+ self.seq_len_dim = 1
543
+ self.attention = ConvBertAttention(config)
544
+ self.is_decoder = config.is_decoder
545
+ self.add_cross_attention = config.add_cross_attention
546
+ if self.add_cross_attention:
547
+ if not self.is_decoder:
548
+ raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
549
+ self.crossattention = ConvBertAttention(config)
550
+ self.intermediate = ConvBertIntermediate(config)
551
+ self.output = ConvBertOutput(config)
552
+
553
+ def forward(
554
+ self,
555
+ hidden_states: torch.Tensor,
556
+ attention_mask: Optional[torch.FloatTensor] = None,
557
+ head_mask: Optional[torch.FloatTensor] = None,
558
+ encoder_hidden_states: Optional[torch.Tensor] = None,
559
+ encoder_attention_mask: Optional[torch.Tensor] = None,
560
+ output_attentions: Optional[bool] = False,
561
+ ) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]:
562
+ self_attention_outputs = self.attention(
563
+ hidden_states,
564
+ attention_mask,
565
+ head_mask,
566
+ output_attentions=output_attentions,
567
+ )
568
+ attention_output = self_attention_outputs[0]
569
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
570
+
571
+ if self.is_decoder and encoder_hidden_states is not None:
572
+ if not hasattr(self, "crossattention"):
573
+ raise AttributeError(
574
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
575
+ " by setting `config.add_cross_attention=True`"
576
+ )
577
+ cross_attention_outputs = self.crossattention(
578
+ attention_output,
579
+ encoder_attention_mask,
580
+ head_mask,
581
+ encoder_hidden_states,
582
+ output_attentions,
583
+ )
584
+ attention_output = cross_attention_outputs[0]
585
+ outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
586
+
587
+ layer_output = apply_chunking_to_forward(
588
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
589
+ )
590
+ outputs = (layer_output,) + outputs
591
+ return outputs
592
+
593
+ def feed_forward_chunk(self, attention_output):
594
+ intermediate_output = self.intermediate(attention_output)
595
+ layer_output = self.output(intermediate_output, attention_output)
596
+ return layer_output
597
+
598
+
599
+ class ConvBertEncoder(nn.Module):
600
+ def __init__(self, config):
601
+ super().__init__()
602
+ self.config = config
603
+ self.layer = nn.ModuleList([ConvBertLayer(config) for _ in range(config.num_hidden_layers)])
604
+ self.gradient_checkpointing = False
605
+
606
+ def forward(
607
+ self,
608
+ hidden_states: torch.Tensor,
609
+ attention_mask: Optional[torch.FloatTensor] = None,
610
+ head_mask: Optional[torch.FloatTensor] = None,
611
+ encoder_hidden_states: Optional[torch.Tensor] = None,
612
+ encoder_attention_mask: Optional[torch.Tensor] = None,
613
+ output_attentions: Optional[bool] = False,
614
+ output_hidden_states: Optional[bool] = False,
615
+ return_dict: Optional[bool] = True,
616
+ ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
617
+ all_hidden_states = () if output_hidden_states else None
618
+ all_self_attentions = () if output_attentions else None
619
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
620
+ for i, layer_module in enumerate(self.layer):
621
+ if output_hidden_states:
622
+ all_hidden_states = all_hidden_states + (hidden_states,)
623
+
624
+ layer_head_mask = head_mask[i] if head_mask is not None else None
625
+
626
+ if self.gradient_checkpointing and self.training:
627
+ layer_outputs = self._gradient_checkpointing_func(
628
+ layer_module.__call__,
629
+ hidden_states,
630
+ attention_mask,
631
+ layer_head_mask,
632
+ encoder_hidden_states,
633
+ encoder_attention_mask,
634
+ output_attentions,
635
+ )
636
+ else:
637
+ layer_outputs = layer_module(
638
+ hidden_states,
639
+ attention_mask,
640
+ layer_head_mask,
641
+ encoder_hidden_states,
642
+ encoder_attention_mask,
643
+ output_attentions,
644
+ )
645
+ hidden_states = layer_outputs[0]
646
+ if output_attentions:
647
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
648
+ if self.config.add_cross_attention:
649
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
650
+
651
+ if output_hidden_states:
652
+ all_hidden_states = all_hidden_states + (hidden_states,)
653
+
654
+ if not return_dict:
655
+ return tuple(
656
+ v
657
+ for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]
658
+ if v is not None
659
+ )
660
+ return BaseModelOutputWithCrossAttentions(
661
+ last_hidden_state=hidden_states,
662
+ hidden_states=all_hidden_states,
663
+ attentions=all_self_attentions,
664
+ cross_attentions=all_cross_attentions,
665
+ )
666
+
667
+
668
+ class ConvBertPredictionHeadTransform(nn.Module):
669
+ def __init__(self, config):
670
+ super().__init__()
671
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
672
+ if isinstance(config.hidden_act, str):
673
+ self.transform_act_fn = ACT2FN[config.hidden_act]
674
+ else:
675
+ self.transform_act_fn = config.hidden_act
676
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
677
+
678
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
679
+ hidden_states = self.dense(hidden_states)
680
+ hidden_states = self.transform_act_fn(hidden_states)
681
+ hidden_states = self.LayerNorm(hidden_states)
682
+ return hidden_states
683
+
684
+
685
+ CONVBERT_START_DOCSTRING = r"""
686
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
687
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
688
+ behavior.
689
+
690
+ Parameters:
691
+ config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
692
+ Initializing with a config file does not load the weights associated with the model, only the
693
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
694
+ """
695
+
696
+ CONVBERT_INPUTS_DOCSTRING = r"""
697
+ Args:
698
+ input_ids (`torch.LongTensor` of shape `({0})`):
699
+ Indices of input sequence tokens in the vocabulary.
700
+
701
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
702
+ [`PreTrainedTokenizer.__call__`] for details.
703
+
704
+ [What are input IDs?](../glossary#input-ids)
705
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
706
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
707
+
708
+
709
+ - 1 for tokens that are **not masked**,
710
+ - 0 for tokens that are **masked**.
711
+
712
+ [What are attention masks?](../glossary#attention-mask)
713
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
714
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
715
+ 1]`:
716
+
717
+
718
+ - 0 corresponds to a *sentence A* token,
719
+ - 1 corresponds to a *sentence B* token.
720
+
721
+ [What are token type IDs?](../glossary#token-type-ids)
722
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
723
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
724
+ config.max_position_embeddings - 1]`.
725
+
726
+ [What are position IDs?](../glossary#position-ids)
727
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
728
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
729
+
730
+
731
+ - 1 indicates the head is **not masked**,
732
+ - 0 indicates the head is **masked**.
733
+
734
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
735
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
736
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
737
+ model's internal embedding lookup matrix.
738
+ output_attentions (`bool`, *optional*):
739
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
740
+ tensors for more detail.
741
+ output_hidden_states (`bool`, *optional*):
742
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
743
+ more detail.
744
+ return_dict (`bool`, *optional*):
745
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
746
+ """
747
+
748
+
749
+ @add_start_docstrings(
750
+ "The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
751
+ CONVBERT_START_DOCSTRING,
752
+ )
753
+ class ConvBertModel(ConvBertPreTrainedModel):
754
+ def __init__(self, config):
755
+ super().__init__(config)
756
+ self.embeddings = ConvBertEmbeddings(config)
757
+
758
+ if config.embedding_size != config.hidden_size:
759
+ self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
760
+
761
+ self.encoder = ConvBertEncoder(config)
762
+ self.config = config
763
+ # Initialize weights and apply final processing
764
+ self.post_init()
765
+
766
+ def get_input_embeddings(self):
767
+ return self.embeddings.word_embeddings
768
+
769
+ def set_input_embeddings(self, value):
770
+ self.embeddings.word_embeddings = value
771
+
772
+ def _prune_heads(self, heads_to_prune):
773
+ """
774
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
775
+ class PreTrainedModel
776
+ """
777
+ for layer, heads in heads_to_prune.items():
778
+ self.encoder.layer[layer].attention.prune_heads(heads)
779
+
780
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
781
+ @add_code_sample_docstrings(
782
+ checkpoint=_CHECKPOINT_FOR_DOC,
783
+ output_type=BaseModelOutputWithCrossAttentions,
784
+ config_class=_CONFIG_FOR_DOC,
785
+ )
786
+ def forward(
787
+ self,
788
+ input_ids: Optional[torch.LongTensor] = None,
789
+ attention_mask: Optional[torch.FloatTensor] = None,
790
+ token_type_ids: Optional[torch.LongTensor] = None,
791
+ position_ids: Optional[torch.LongTensor] = None,
792
+ head_mask: Optional[torch.FloatTensor] = None,
793
+ inputs_embeds: Optional[torch.FloatTensor] = None,
794
+ output_attentions: Optional[bool] = None,
795
+ output_hidden_states: Optional[bool] = None,
796
+ return_dict: Optional[bool] = None,
797
+ ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
798
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
799
+ output_hidden_states = (
800
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
801
+ )
802
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
803
+
804
+ if input_ids is not None and inputs_embeds is not None:
805
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
806
+ elif input_ids is not None:
807
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
808
+ input_shape = input_ids.size()
809
+ elif inputs_embeds is not None:
810
+ input_shape = inputs_embeds.size()[:-1]
811
+ else:
812
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
813
+
814
+ batch_size, seq_length = input_shape
815
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
816
+
817
+ if attention_mask is None:
818
+ attention_mask = torch.ones(input_shape, device=device)
819
+ if token_type_ids is None:
820
+ if hasattr(self.embeddings, "token_type_ids"):
821
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
822
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
823
+ token_type_ids = buffered_token_type_ids_expanded
824
+ else:
825
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
826
+
827
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
828
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
829
+
830
+ hidden_states = self.embeddings(
831
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
832
+ )
833
+
834
+ if hasattr(self, "embeddings_project"):
835
+ hidden_states = self.embeddings_project(hidden_states)
836
+
837
+ hidden_states = self.encoder(
838
+ hidden_states,
839
+ attention_mask=extended_attention_mask,
840
+ head_mask=head_mask,
841
+ output_attentions=output_attentions,
842
+ output_hidden_states=output_hidden_states,
843
+ return_dict=return_dict,
844
+ )
845
+
846
+ return hidden_states
847
+
848
+
849
+ class ConvBertGeneratorPredictions(nn.Module):
850
+ """Prediction module for the generator, made up of two dense layers."""
851
+
852
+ def __init__(self, config):
853
+ super().__init__()
854
+
855
+ self.activation = get_activation("gelu")
856
+ self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
857
+ self.dense = nn.Linear(config.hidden_size, config.embedding_size)
858
+
859
+ def forward(self, generator_hidden_states: torch.FloatTensor) -> torch.FloatTensor:
860
+ hidden_states = self.dense(generator_hidden_states)
861
+ hidden_states = self.activation(hidden_states)
862
+ hidden_states = self.LayerNorm(hidden_states)
863
+
864
+ return hidden_states
865
+
866
+
867
+ @add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
868
+ class ConvBertForMaskedLM(ConvBertPreTrainedModel):
869
+ _tied_weights_keys = ["generator.lm_head.weight"]
870
+
871
+ def __init__(self, config):
872
+ super().__init__(config)
873
+
874
+ self.convbert = ConvBertModel(config)
875
+ self.generator_predictions = ConvBertGeneratorPredictions(config)
876
+
877
+ self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
878
+ # Initialize weights and apply final processing
879
+ self.post_init()
880
+
881
+ def get_output_embeddings(self):
882
+ return self.generator_lm_head
883
+
884
+ def set_output_embeddings(self, word_embeddings):
885
+ self.generator_lm_head = word_embeddings
886
+
887
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
888
+ @add_code_sample_docstrings(
889
+ checkpoint=_CHECKPOINT_FOR_DOC,
890
+ output_type=MaskedLMOutput,
891
+ config_class=_CONFIG_FOR_DOC,
892
+ )
893
+ def forward(
894
+ self,
895
+ input_ids: Optional[torch.LongTensor] = None,
896
+ attention_mask: Optional[torch.FloatTensor] = None,
897
+ token_type_ids: Optional[torch.LongTensor] = None,
898
+ position_ids: Optional[torch.LongTensor] = None,
899
+ head_mask: Optional[torch.FloatTensor] = None,
900
+ inputs_embeds: Optional[torch.FloatTensor] = None,
901
+ labels: Optional[torch.LongTensor] = None,
902
+ output_attentions: Optional[bool] = None,
903
+ output_hidden_states: Optional[bool] = None,
904
+ return_dict: Optional[bool] = None,
905
+ ) -> Union[Tuple, MaskedLMOutput]:
906
+ r"""
907
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
908
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
909
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
910
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
911
+ """
912
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
913
+
914
+ generator_hidden_states = self.convbert(
915
+ input_ids,
916
+ attention_mask,
917
+ token_type_ids,
918
+ position_ids,
919
+ head_mask,
920
+ inputs_embeds,
921
+ output_attentions,
922
+ output_hidden_states,
923
+ return_dict,
924
+ )
925
+ generator_sequence_output = generator_hidden_states[0]
926
+
927
+ prediction_scores = self.generator_predictions(generator_sequence_output)
928
+ prediction_scores = self.generator_lm_head(prediction_scores)
929
+
930
+ loss = None
931
+ # Masked language modeling softmax layer
932
+ if labels is not None:
933
+ loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
934
+ loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
935
+
936
+ if not return_dict:
937
+ output = (prediction_scores,) + generator_hidden_states[1:]
938
+ return ((loss,) + output) if loss is not None else output
939
+
940
+ return MaskedLMOutput(
941
+ loss=loss,
942
+ logits=prediction_scores,
943
+ hidden_states=generator_hidden_states.hidden_states,
944
+ attentions=generator_hidden_states.attentions,
945
+ )
946
+
947
+
948
+ class ConvBertClassificationHead(nn.Module):
949
+ """Head for sentence-level classification tasks."""
950
+
951
+ def __init__(self, config):
952
+ super().__init__()
953
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
954
+ classifier_dropout = (
955
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
956
+ )
957
+ self.dropout = nn.Dropout(classifier_dropout)
958
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
959
+
960
+ self.config = config
961
+
962
+ def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
963
+ x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
964
+ x = self.dropout(x)
965
+ x = self.dense(x)
966
+ x = ACT2FN[self.config.hidden_act](x)
967
+ x = self.dropout(x)
968
+ x = self.out_proj(x)
969
+ return x
970
+
971
+
972
+ @add_start_docstrings(
973
+ """
974
+ ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
975
+ pooled output) e.g. for GLUE tasks.
976
+ """,
977
+ CONVBERT_START_DOCSTRING,
978
+ )
979
+ class ConvBertForSequenceClassification(ConvBertPreTrainedModel):
980
+ def __init__(self, config):
981
+ super().__init__(config)
982
+ self.num_labels = config.num_labels
983
+ self.config = config
984
+ self.convbert = ConvBertModel(config)
985
+ self.classifier = ConvBertClassificationHead(config)
986
+
987
+ # Initialize weights and apply final processing
988
+ self.post_init()
989
+
990
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
991
+ @add_code_sample_docstrings(
992
+ checkpoint=_CHECKPOINT_FOR_DOC,
993
+ output_type=SequenceClassifierOutput,
994
+ config_class=_CONFIG_FOR_DOC,
995
+ )
996
+ def forward(
997
+ self,
998
+ input_ids: Optional[torch.LongTensor] = None,
999
+ attention_mask: Optional[torch.FloatTensor] = None,
1000
+ token_type_ids: Optional[torch.LongTensor] = None,
1001
+ position_ids: Optional[torch.LongTensor] = None,
1002
+ head_mask: Optional[torch.FloatTensor] = None,
1003
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1004
+ labels: Optional[torch.LongTensor] = None,
1005
+ output_attentions: Optional[bool] = None,
1006
+ output_hidden_states: Optional[bool] = None,
1007
+ return_dict: Optional[bool] = None,
1008
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1009
+ r"""
1010
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1011
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1012
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1013
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1014
+ """
1015
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1016
+
1017
+ outputs = self.convbert(
1018
+ input_ids,
1019
+ attention_mask=attention_mask,
1020
+ token_type_ids=token_type_ids,
1021
+ position_ids=position_ids,
1022
+ head_mask=head_mask,
1023
+ inputs_embeds=inputs_embeds,
1024
+ output_attentions=output_attentions,
1025
+ output_hidden_states=output_hidden_states,
1026
+ return_dict=return_dict,
1027
+ )
1028
+
1029
+ sequence_output = outputs[0]
1030
+ logits = self.classifier(sequence_output)
1031
+
1032
+ loss = None
1033
+ if labels is not None:
1034
+ if self.config.problem_type is None:
1035
+ if self.num_labels == 1:
1036
+ self.config.problem_type = "regression"
1037
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1038
+ self.config.problem_type = "single_label_classification"
1039
+ else:
1040
+ self.config.problem_type = "multi_label_classification"
1041
+
1042
+ if self.config.problem_type == "regression":
1043
+ loss_fct = MSELoss()
1044
+ if self.num_labels == 1:
1045
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1046
+ else:
1047
+ loss = loss_fct(logits, labels)
1048
+ elif self.config.problem_type == "single_label_classification":
1049
+ loss_fct = CrossEntropyLoss()
1050
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1051
+ elif self.config.problem_type == "multi_label_classification":
1052
+ loss_fct = BCEWithLogitsLoss()
1053
+ loss = loss_fct(logits, labels)
1054
+
1055
+ if not return_dict:
1056
+ output = (logits,) + outputs[1:]
1057
+ return ((loss,) + output) if loss is not None else output
1058
+
1059
+ return SequenceClassifierOutput(
1060
+ loss=loss,
1061
+ logits=logits,
1062
+ hidden_states=outputs.hidden_states,
1063
+ attentions=outputs.attentions,
1064
+ )
1065
+
1066
+
1067
+ @add_start_docstrings(
1068
+ """
1069
+ ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1070
+ softmax) e.g. for RocStories/SWAG tasks.
1071
+ """,
1072
+ CONVBERT_START_DOCSTRING,
1073
+ )
1074
+ class ConvBertForMultipleChoice(ConvBertPreTrainedModel):
1075
+ def __init__(self, config):
1076
+ super().__init__(config)
1077
+
1078
+ self.convbert = ConvBertModel(config)
1079
+ self.sequence_summary = SequenceSummary(config)
1080
+ self.classifier = nn.Linear(config.hidden_size, 1)
1081
+
1082
+ # Initialize weights and apply final processing
1083
+ self.post_init()
1084
+
1085
+ @add_start_docstrings_to_model_forward(
1086
+ CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1087
+ )
1088
+ @add_code_sample_docstrings(
1089
+ checkpoint=_CHECKPOINT_FOR_DOC,
1090
+ output_type=MultipleChoiceModelOutput,
1091
+ config_class=_CONFIG_FOR_DOC,
1092
+ )
1093
+ def forward(
1094
+ self,
1095
+ input_ids: Optional[torch.LongTensor] = None,
1096
+ attention_mask: Optional[torch.FloatTensor] = None,
1097
+ token_type_ids: Optional[torch.LongTensor] = None,
1098
+ position_ids: Optional[torch.LongTensor] = None,
1099
+ head_mask: Optional[torch.FloatTensor] = None,
1100
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1101
+ labels: Optional[torch.LongTensor] = None,
1102
+ output_attentions: Optional[bool] = None,
1103
+ output_hidden_states: Optional[bool] = None,
1104
+ return_dict: Optional[bool] = None,
1105
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1106
+ r"""
1107
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1108
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1109
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1110
+ `input_ids` above)
1111
+ """
1112
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1113
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1114
+
1115
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1116
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1117
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1118
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1119
+ inputs_embeds = (
1120
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1121
+ if inputs_embeds is not None
1122
+ else None
1123
+ )
1124
+
1125
+ outputs = self.convbert(
1126
+ input_ids,
1127
+ attention_mask=attention_mask,
1128
+ token_type_ids=token_type_ids,
1129
+ position_ids=position_ids,
1130
+ head_mask=head_mask,
1131
+ inputs_embeds=inputs_embeds,
1132
+ output_attentions=output_attentions,
1133
+ output_hidden_states=output_hidden_states,
1134
+ return_dict=return_dict,
1135
+ )
1136
+
1137
+ sequence_output = outputs[0]
1138
+
1139
+ pooled_output = self.sequence_summary(sequence_output)
1140
+ logits = self.classifier(pooled_output)
1141
+ reshaped_logits = logits.view(-1, num_choices)
1142
+
1143
+ loss = None
1144
+ if labels is not None:
1145
+ loss_fct = CrossEntropyLoss()
1146
+ loss = loss_fct(reshaped_logits, labels)
1147
+
1148
+ if not return_dict:
1149
+ output = (reshaped_logits,) + outputs[1:]
1150
+ return ((loss,) + output) if loss is not None else output
1151
+
1152
+ return MultipleChoiceModelOutput(
1153
+ loss=loss,
1154
+ logits=reshaped_logits,
1155
+ hidden_states=outputs.hidden_states,
1156
+ attentions=outputs.attentions,
1157
+ )
1158
+
1159
+
1160
+ @add_start_docstrings(
1161
+ """
1162
+ ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1163
+ Named-Entity-Recognition (NER) tasks.
1164
+ """,
1165
+ CONVBERT_START_DOCSTRING,
1166
+ )
1167
+ class ConvBertForTokenClassification(ConvBertPreTrainedModel):
1168
+ def __init__(self, config):
1169
+ super().__init__(config)
1170
+ self.num_labels = config.num_labels
1171
+
1172
+ self.convbert = ConvBertModel(config)
1173
+ classifier_dropout = (
1174
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1175
+ )
1176
+ self.dropout = nn.Dropout(classifier_dropout)
1177
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1178
+
1179
+ # Initialize weights and apply final processing
1180
+ self.post_init()
1181
+
1182
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1183
+ @add_code_sample_docstrings(
1184
+ checkpoint=_CHECKPOINT_FOR_DOC,
1185
+ output_type=TokenClassifierOutput,
1186
+ config_class=_CONFIG_FOR_DOC,
1187
+ )
1188
+ def forward(
1189
+ self,
1190
+ input_ids: Optional[torch.LongTensor] = None,
1191
+ attention_mask: Optional[torch.FloatTensor] = None,
1192
+ token_type_ids: Optional[torch.LongTensor] = None,
1193
+ position_ids: Optional[torch.LongTensor] = None,
1194
+ head_mask: Optional[torch.FloatTensor] = None,
1195
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1196
+ labels: Optional[torch.LongTensor] = None,
1197
+ output_attentions: Optional[bool] = None,
1198
+ output_hidden_states: Optional[bool] = None,
1199
+ return_dict: Optional[bool] = None,
1200
+ ) -> Union[Tuple, TokenClassifierOutput]:
1201
+ r"""
1202
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1203
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1204
+ """
1205
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1206
+
1207
+ outputs = self.convbert(
1208
+ input_ids,
1209
+ attention_mask=attention_mask,
1210
+ token_type_ids=token_type_ids,
1211
+ position_ids=position_ids,
1212
+ head_mask=head_mask,
1213
+ inputs_embeds=inputs_embeds,
1214
+ output_attentions=output_attentions,
1215
+ output_hidden_states=output_hidden_states,
1216
+ return_dict=return_dict,
1217
+ )
1218
+
1219
+ sequence_output = outputs[0]
1220
+
1221
+ sequence_output = self.dropout(sequence_output)
1222
+ logits = self.classifier(sequence_output)
1223
+
1224
+ loss = None
1225
+ if labels is not None:
1226
+ loss_fct = CrossEntropyLoss()
1227
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1228
+
1229
+ if not return_dict:
1230
+ output = (logits,) + outputs[1:]
1231
+ return ((loss,) + output) if loss is not None else output
1232
+
1233
+ return TokenClassifierOutput(
1234
+ loss=loss,
1235
+ logits=logits,
1236
+ hidden_states=outputs.hidden_states,
1237
+ attentions=outputs.attentions,
1238
+ )
1239
+
1240
+
1241
+ @add_start_docstrings(
1242
+ """
1243
+ ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1244
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1245
+ """,
1246
+ CONVBERT_START_DOCSTRING,
1247
+ )
1248
+ class ConvBertForQuestionAnswering(ConvBertPreTrainedModel):
1249
+ def __init__(self, config):
1250
+ super().__init__(config)
1251
+
1252
+ self.num_labels = config.num_labels
1253
+ self.convbert = ConvBertModel(config)
1254
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1255
+
1256
+ # Initialize weights and apply final processing
1257
+ self.post_init()
1258
+
1259
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1260
+ @add_code_sample_docstrings(
1261
+ checkpoint=_CHECKPOINT_FOR_DOC,
1262
+ output_type=QuestionAnsweringModelOutput,
1263
+ config_class=_CONFIG_FOR_DOC,
1264
+ )
1265
+ def forward(
1266
+ self,
1267
+ input_ids: Optional[torch.LongTensor] = None,
1268
+ attention_mask: Optional[torch.FloatTensor] = None,
1269
+ token_type_ids: Optional[torch.LongTensor] = None,
1270
+ position_ids: Optional[torch.LongTensor] = None,
1271
+ head_mask: Optional[torch.FloatTensor] = None,
1272
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1273
+ start_positions: Optional[torch.LongTensor] = None,
1274
+ end_positions: Optional[torch.LongTensor] = None,
1275
+ output_attentions: Optional[bool] = None,
1276
+ output_hidden_states: Optional[bool] = None,
1277
+ return_dict: Optional[bool] = None,
1278
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1279
+ r"""
1280
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1281
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1282
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1283
+ are not taken into account for computing the loss.
1284
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1285
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1286
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1287
+ are not taken into account for computing the loss.
1288
+ """
1289
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1290
+
1291
+ outputs = self.convbert(
1292
+ input_ids,
1293
+ attention_mask=attention_mask,
1294
+ token_type_ids=token_type_ids,
1295
+ position_ids=position_ids,
1296
+ head_mask=head_mask,
1297
+ inputs_embeds=inputs_embeds,
1298
+ output_attentions=output_attentions,
1299
+ output_hidden_states=output_hidden_states,
1300
+ return_dict=return_dict,
1301
+ )
1302
+
1303
+ sequence_output = outputs[0]
1304
+
1305
+ logits = self.qa_outputs(sequence_output)
1306
+ start_logits, end_logits = logits.split(1, dim=-1)
1307
+ start_logits = start_logits.squeeze(-1).contiguous()
1308
+ end_logits = end_logits.squeeze(-1).contiguous()
1309
+
1310
+ total_loss = None
1311
+ if start_positions is not None and end_positions is not None:
1312
+ # If we are on multi-GPU, split add a dimension
1313
+ if len(start_positions.size()) > 1:
1314
+ start_positions = start_positions.squeeze(-1)
1315
+ if len(end_positions.size()) > 1:
1316
+ end_positions = end_positions.squeeze(-1)
1317
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1318
+ ignored_index = start_logits.size(1)
1319
+ start_positions = start_positions.clamp(0, ignored_index)
1320
+ end_positions = end_positions.clamp(0, ignored_index)
1321
+
1322
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1323
+ start_loss = loss_fct(start_logits, start_positions)
1324
+ end_loss = loss_fct(end_logits, end_positions)
1325
+ total_loss = (start_loss + end_loss) / 2
1326
+
1327
+ if not return_dict:
1328
+ output = (start_logits, end_logits) + outputs[1:]
1329
+ return ((total_loss,) + output) if total_loss is not None else output
1330
+
1331
+ return QuestionAnsweringModelOutput(
1332
+ loss=total_loss,
1333
+ start_logits=start_logits,
1334
+ end_logits=end_logits,
1335
+ hidden_states=outputs.hidden_states,
1336
+ attentions=outputs.attentions,
1337
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/modeling_tf_convbert.py ADDED
@@ -0,0 +1,1468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 ConvBERT model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import (
27
+ TFBaseModelOutput,
28
+ TFMaskedLMOutput,
29
+ TFMultipleChoiceModelOutput,
30
+ TFQuestionAnsweringModelOutput,
31
+ TFSequenceClassifierOutput,
32
+ TFTokenClassifierOutput,
33
+ )
34
+ from ...modeling_tf_utils import (
35
+ TFMaskedLanguageModelingLoss,
36
+ TFModelInputType,
37
+ TFMultipleChoiceLoss,
38
+ TFPreTrainedModel,
39
+ TFQuestionAnsweringLoss,
40
+ TFSequenceClassificationLoss,
41
+ TFSequenceSummary,
42
+ TFTokenClassificationLoss,
43
+ get_initializer,
44
+ keras,
45
+ keras_serializable,
46
+ unpack_inputs,
47
+ )
48
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
49
+ from ...utils import (
50
+ add_code_sample_docstrings,
51
+ add_start_docstrings,
52
+ add_start_docstrings_to_model_forward,
53
+ logging,
54
+ )
55
+ from .configuration_convbert import ConvBertConfig
56
+
57
+
58
+ logger = logging.get_logger(__name__)
59
+
60
+ _CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
61
+ _CONFIG_FOR_DOC = "ConvBertConfig"
62
+
63
+
64
+ from ..deprecated._archive_maps import TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
65
+
66
+
67
+ # Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
68
+ class TFConvBertEmbeddings(keras.layers.Layer):
69
+ """Construct the embeddings from word, position and token_type embeddings."""
70
+
71
+ def __init__(self, config: ConvBertConfig, **kwargs):
72
+ super().__init__(**kwargs)
73
+
74
+ self.config = config
75
+ self.embedding_size = config.embedding_size
76
+ self.max_position_embeddings = config.max_position_embeddings
77
+ self.initializer_range = config.initializer_range
78
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
79
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
80
+
81
+ def build(self, input_shape=None):
82
+ with tf.name_scope("word_embeddings"):
83
+ self.weight = self.add_weight(
84
+ name="weight",
85
+ shape=[self.config.vocab_size, self.embedding_size],
86
+ initializer=get_initializer(self.initializer_range),
87
+ )
88
+
89
+ with tf.name_scope("token_type_embeddings"):
90
+ self.token_type_embeddings = self.add_weight(
91
+ name="embeddings",
92
+ shape=[self.config.type_vocab_size, self.embedding_size],
93
+ initializer=get_initializer(self.initializer_range),
94
+ )
95
+
96
+ with tf.name_scope("position_embeddings"):
97
+ self.position_embeddings = self.add_weight(
98
+ name="embeddings",
99
+ shape=[self.max_position_embeddings, self.embedding_size],
100
+ initializer=get_initializer(self.initializer_range),
101
+ )
102
+
103
+ if self.built:
104
+ return
105
+ self.built = True
106
+ if getattr(self, "LayerNorm", None) is not None:
107
+ with tf.name_scope(self.LayerNorm.name):
108
+ self.LayerNorm.build([None, None, self.config.embedding_size])
109
+
110
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
111
+ def call(
112
+ self,
113
+ input_ids: tf.Tensor = None,
114
+ position_ids: tf.Tensor = None,
115
+ token_type_ids: tf.Tensor = None,
116
+ inputs_embeds: tf.Tensor = None,
117
+ past_key_values_length=0,
118
+ training: bool = False,
119
+ ) -> tf.Tensor:
120
+ """
121
+ Applies embedding based on inputs tensor.
122
+
123
+ Returns:
124
+ final_embeddings (`tf.Tensor`): output embedding tensor.
125
+ """
126
+ if input_ids is None and inputs_embeds is None:
127
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
128
+
129
+ if input_ids is not None:
130
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
131
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
132
+
133
+ input_shape = shape_list(inputs_embeds)[:-1]
134
+
135
+ if token_type_ids is None:
136
+ token_type_ids = tf.fill(dims=input_shape, value=0)
137
+
138
+ if position_ids is None:
139
+ position_ids = tf.expand_dims(
140
+ tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
141
+ )
142
+
143
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
144
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
145
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
146
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
147
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
148
+
149
+ return final_embeddings
150
+
151
+
152
+ class TFConvBertSelfAttention(keras.layers.Layer):
153
+ def __init__(self, config, **kwargs):
154
+ super().__init__(**kwargs)
155
+
156
+ if config.hidden_size % config.num_attention_heads != 0:
157
+ raise ValueError(
158
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
159
+ f"heads ({config.num_attention_heads})"
160
+ )
161
+
162
+ new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
163
+ if new_num_attention_heads < 1:
164
+ self.head_ratio = config.num_attention_heads
165
+ num_attention_heads = 1
166
+ else:
167
+ num_attention_heads = new_num_attention_heads
168
+ self.head_ratio = config.head_ratio
169
+
170
+ self.num_attention_heads = num_attention_heads
171
+ self.conv_kernel_size = config.conv_kernel_size
172
+
173
+ if config.hidden_size % self.num_attention_heads != 0:
174
+ raise ValueError("hidden_size should be divisible by num_attention_heads")
175
+
176
+ self.attention_head_size = config.hidden_size // config.num_attention_heads
177
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
178
+ self.query = keras.layers.Dense(
179
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
180
+ )
181
+ self.key = keras.layers.Dense(
182
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
183
+ )
184
+ self.value = keras.layers.Dense(
185
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
186
+ )
187
+
188
+ self.key_conv_attn_layer = keras.layers.SeparableConv1D(
189
+ self.all_head_size,
190
+ self.conv_kernel_size,
191
+ padding="same",
192
+ activation=None,
193
+ depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
194
+ pointwise_initializer=get_initializer(config.initializer_range),
195
+ name="key_conv_attn_layer",
196
+ )
197
+
198
+ self.conv_kernel_layer = keras.layers.Dense(
199
+ self.num_attention_heads * self.conv_kernel_size,
200
+ activation=None,
201
+ name="conv_kernel_layer",
202
+ kernel_initializer=get_initializer(config.initializer_range),
203
+ )
204
+
205
+ self.conv_out_layer = keras.layers.Dense(
206
+ self.all_head_size,
207
+ activation=None,
208
+ name="conv_out_layer",
209
+ kernel_initializer=get_initializer(config.initializer_range),
210
+ )
211
+
212
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
213
+ self.config = config
214
+
215
+ def transpose_for_scores(self, x, batch_size):
216
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
217
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
218
+ return tf.transpose(x, perm=[0, 2, 1, 3])
219
+
220
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
221
+ batch_size = shape_list(hidden_states)[0]
222
+ mixed_query_layer = self.query(hidden_states)
223
+ mixed_key_layer = self.key(hidden_states)
224
+ mixed_value_layer = self.value(hidden_states)
225
+
226
+ mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
227
+
228
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
229
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
230
+ conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
231
+
232
+ conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
233
+ conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
234
+ conv_kernel_layer = stable_softmax(conv_kernel_layer, axis=1)
235
+
236
+ paddings = tf.constant(
237
+ [
238
+ [
239
+ 0,
240
+ 0,
241
+ ],
242
+ [int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
243
+ [0, 0],
244
+ ]
245
+ )
246
+
247
+ conv_out_layer = self.conv_out_layer(hidden_states)
248
+ conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
249
+ conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
250
+
251
+ unfold_conv_out_layer = tf.stack(
252
+ [
253
+ tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
254
+ for i in range(self.conv_kernel_size)
255
+ ],
256
+ axis=-1,
257
+ )
258
+
259
+ conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
260
+
261
+ conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
262
+ conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
263
+
264
+ # Take the dot product between "query" and "key" to get the raw attention scores.
265
+ attention_scores = tf.matmul(
266
+ query_layer, key_layer, transpose_b=True
267
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
268
+ dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
269
+ attention_scores = attention_scores / tf.math.sqrt(dk)
270
+
271
+ if attention_mask is not None:
272
+ # Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
273
+ attention_scores = attention_scores + attention_mask
274
+
275
+ # Normalize the attention scores to probabilities.
276
+ attention_probs = stable_softmax(attention_scores, axis=-1)
277
+
278
+ # This is actually dropping out entire tokens to attend to, which might
279
+ # seem a bit unusual, but is taken from the original Transformer paper.
280
+ attention_probs = self.dropout(attention_probs, training=training)
281
+
282
+ # Mask heads if we want to
283
+ if head_mask is not None:
284
+ attention_probs = attention_probs * head_mask
285
+
286
+ value_layer = tf.reshape(
287
+ mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
288
+ )
289
+ value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
290
+
291
+ context_layer = tf.matmul(attention_probs, value_layer)
292
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
293
+
294
+ conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
295
+ context_layer = tf.concat([context_layer, conv_out], 2)
296
+ context_layer = tf.reshape(
297
+ context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
298
+ ) # (batch_size, seq_len_q, all_head_size)
299
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
300
+
301
+ return outputs
302
+
303
+ def build(self, input_shape=None):
304
+ if self.built:
305
+ return
306
+ self.built = True
307
+ if getattr(self, "query", None) is not None:
308
+ with tf.name_scope(self.query.name):
309
+ self.query.build([None, None, self.config.hidden_size])
310
+ if getattr(self, "key", None) is not None:
311
+ with tf.name_scope(self.key.name):
312
+ self.key.build([None, None, self.config.hidden_size])
313
+ if getattr(self, "value", None) is not None:
314
+ with tf.name_scope(self.value.name):
315
+ self.value.build([None, None, self.config.hidden_size])
316
+ if getattr(self, "key_conv_attn_layer", None) is not None:
317
+ with tf.name_scope(self.key_conv_attn_layer.name):
318
+ self.key_conv_attn_layer.build([None, None, self.config.hidden_size])
319
+ if getattr(self, "conv_kernel_layer", None) is not None:
320
+ with tf.name_scope(self.conv_kernel_layer.name):
321
+ self.conv_kernel_layer.build([None, None, self.all_head_size])
322
+ if getattr(self, "conv_out_layer", None) is not None:
323
+ with tf.name_scope(self.conv_out_layer.name):
324
+ self.conv_out_layer.build([None, None, self.config.hidden_size])
325
+
326
+
327
+ class TFConvBertSelfOutput(keras.layers.Layer):
328
+ def __init__(self, config, **kwargs):
329
+ super().__init__(**kwargs)
330
+
331
+ self.dense = keras.layers.Dense(
332
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
333
+ )
334
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
335
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
336
+ self.config = config
337
+
338
+ def call(self, hidden_states, input_tensor, training=False):
339
+ hidden_states = self.dense(hidden_states)
340
+ hidden_states = self.dropout(hidden_states, training=training)
341
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
342
+
343
+ return hidden_states
344
+
345
+ def build(self, input_shape=None):
346
+ if self.built:
347
+ return
348
+ self.built = True
349
+ if getattr(self, "dense", None) is not None:
350
+ with tf.name_scope(self.dense.name):
351
+ self.dense.build([None, None, self.config.hidden_size])
352
+ if getattr(self, "LayerNorm", None) is not None:
353
+ with tf.name_scope(self.LayerNorm.name):
354
+ self.LayerNorm.build([None, None, self.config.hidden_size])
355
+
356
+
357
+ class TFConvBertAttention(keras.layers.Layer):
358
+ def __init__(self, config, **kwargs):
359
+ super().__init__(**kwargs)
360
+
361
+ self.self_attention = TFConvBertSelfAttention(config, name="self")
362
+ self.dense_output = TFConvBertSelfOutput(config, name="output")
363
+
364
+ def prune_heads(self, heads):
365
+ raise NotImplementedError
366
+
367
+ def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
368
+ self_outputs = self.self_attention(
369
+ input_tensor, attention_mask, head_mask, output_attentions, training=training
370
+ )
371
+ attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
372
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
373
+
374
+ return outputs
375
+
376
+ def build(self, input_shape=None):
377
+ if self.built:
378
+ return
379
+ self.built = True
380
+ if getattr(self, "self_attention", None) is not None:
381
+ with tf.name_scope(self.self_attention.name):
382
+ self.self_attention.build(None)
383
+ if getattr(self, "dense_output", None) is not None:
384
+ with tf.name_scope(self.dense_output.name):
385
+ self.dense_output.build(None)
386
+
387
+
388
+ class GroupedLinearLayer(keras.layers.Layer):
389
+ def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
390
+ super().__init__(**kwargs)
391
+ self.input_size = input_size
392
+ self.output_size = output_size
393
+ self.num_groups = num_groups
394
+ self.kernel_initializer = kernel_initializer
395
+ self.group_in_dim = self.input_size // self.num_groups
396
+ self.group_out_dim = self.output_size // self.num_groups
397
+
398
+ def build(self, input_shape=None):
399
+ self.kernel = self.add_weight(
400
+ "kernel",
401
+ shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
402
+ initializer=self.kernel_initializer,
403
+ trainable=True,
404
+ )
405
+
406
+ self.bias = self.add_weight(
407
+ "bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
408
+ )
409
+ super().build(input_shape)
410
+
411
+ def call(self, hidden_states):
412
+ batch_size = shape_list(hidden_states)[0]
413
+ x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
414
+ x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
415
+ x = tf.transpose(x, [1, 0, 2])
416
+ x = tf.reshape(x, [batch_size, -1, self.output_size])
417
+ x = tf.nn.bias_add(value=x, bias=self.bias)
418
+ return x
419
+
420
+
421
+ class TFConvBertIntermediate(keras.layers.Layer):
422
+ def __init__(self, config, **kwargs):
423
+ super().__init__(**kwargs)
424
+ if config.num_groups == 1:
425
+ self.dense = keras.layers.Dense(
426
+ config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
427
+ )
428
+ else:
429
+ self.dense = GroupedLinearLayer(
430
+ config.hidden_size,
431
+ config.intermediate_size,
432
+ num_groups=config.num_groups,
433
+ kernel_initializer=get_initializer(config.initializer_range),
434
+ name="dense",
435
+ )
436
+
437
+ if isinstance(config.hidden_act, str):
438
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
439
+ else:
440
+ self.intermediate_act_fn = config.hidden_act
441
+ self.config = config
442
+
443
+ def call(self, hidden_states):
444
+ hidden_states = self.dense(hidden_states)
445
+ hidden_states = self.intermediate_act_fn(hidden_states)
446
+
447
+ return hidden_states
448
+
449
+ def build(self, input_shape=None):
450
+ if self.built:
451
+ return
452
+ self.built = True
453
+ if getattr(self, "dense", None) is not None:
454
+ with tf.name_scope(self.dense.name):
455
+ self.dense.build([None, None, self.config.hidden_size])
456
+
457
+
458
+ class TFConvBertOutput(keras.layers.Layer):
459
+ def __init__(self, config, **kwargs):
460
+ super().__init__(**kwargs)
461
+
462
+ if config.num_groups == 1:
463
+ self.dense = keras.layers.Dense(
464
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
465
+ )
466
+ else:
467
+ self.dense = GroupedLinearLayer(
468
+ config.intermediate_size,
469
+ config.hidden_size,
470
+ num_groups=config.num_groups,
471
+ kernel_initializer=get_initializer(config.initializer_range),
472
+ name="dense",
473
+ )
474
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
475
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
476
+ self.config = config
477
+
478
+ def call(self, hidden_states, input_tensor, training=False):
479
+ hidden_states = self.dense(hidden_states)
480
+ hidden_states = self.dropout(hidden_states, training=training)
481
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
482
+
483
+ return hidden_states
484
+
485
+ def build(self, input_shape=None):
486
+ if self.built:
487
+ return
488
+ self.built = True
489
+ if getattr(self, "LayerNorm", None) is not None:
490
+ with tf.name_scope(self.LayerNorm.name):
491
+ self.LayerNorm.build([None, None, self.config.hidden_size])
492
+ if getattr(self, "dense", None) is not None:
493
+ with tf.name_scope(self.dense.name):
494
+ self.dense.build([None, None, self.config.intermediate_size])
495
+
496
+
497
+ class TFConvBertLayer(keras.layers.Layer):
498
+ def __init__(self, config, **kwargs):
499
+ super().__init__(**kwargs)
500
+
501
+ self.attention = TFConvBertAttention(config, name="attention")
502
+ self.intermediate = TFConvBertIntermediate(config, name="intermediate")
503
+ self.bert_output = TFConvBertOutput(config, name="output")
504
+
505
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
506
+ attention_outputs = self.attention(
507
+ hidden_states, attention_mask, head_mask, output_attentions, training=training
508
+ )
509
+ attention_output = attention_outputs[0]
510
+ intermediate_output = self.intermediate(attention_output)
511
+ layer_output = self.bert_output(intermediate_output, attention_output, training=training)
512
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
513
+
514
+ return outputs
515
+
516
+ def build(self, input_shape=None):
517
+ if self.built:
518
+ return
519
+ self.built = True
520
+ if getattr(self, "attention", None) is not None:
521
+ with tf.name_scope(self.attention.name):
522
+ self.attention.build(None)
523
+ if getattr(self, "intermediate", None) is not None:
524
+ with tf.name_scope(self.intermediate.name):
525
+ self.intermediate.build(None)
526
+ if getattr(self, "bert_output", None) is not None:
527
+ with tf.name_scope(self.bert_output.name):
528
+ self.bert_output.build(None)
529
+
530
+
531
+ class TFConvBertEncoder(keras.layers.Layer):
532
+ def __init__(self, config, **kwargs):
533
+ super().__init__(**kwargs)
534
+
535
+ self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
536
+
537
+ def call(
538
+ self,
539
+ hidden_states,
540
+ attention_mask,
541
+ head_mask,
542
+ output_attentions,
543
+ output_hidden_states,
544
+ return_dict,
545
+ training=False,
546
+ ):
547
+ all_hidden_states = () if output_hidden_states else None
548
+ all_attentions = () if output_attentions else None
549
+
550
+ for i, layer_module in enumerate(self.layer):
551
+ if output_hidden_states:
552
+ all_hidden_states = all_hidden_states + (hidden_states,)
553
+
554
+ layer_outputs = layer_module(
555
+ hidden_states, attention_mask, head_mask[i], output_attentions, training=training
556
+ )
557
+ hidden_states = layer_outputs[0]
558
+
559
+ if output_attentions:
560
+ all_attentions = all_attentions + (layer_outputs[1],)
561
+
562
+ # Add last layer
563
+ if output_hidden_states:
564
+ all_hidden_states = all_hidden_states + (hidden_states,)
565
+
566
+ if not return_dict:
567
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
568
+
569
+ return TFBaseModelOutput(
570
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
571
+ )
572
+
573
+ def build(self, input_shape=None):
574
+ if self.built:
575
+ return
576
+ self.built = True
577
+ if getattr(self, "layer", None) is not None:
578
+ for layer in self.layer:
579
+ with tf.name_scope(layer.name):
580
+ layer.build(None)
581
+
582
+
583
+ class TFConvBertPredictionHeadTransform(keras.layers.Layer):
584
+ def __init__(self, config, **kwargs):
585
+ super().__init__(**kwargs)
586
+
587
+ self.dense = keras.layers.Dense(
588
+ config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
589
+ )
590
+
591
+ if isinstance(config.hidden_act, str):
592
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
593
+ else:
594
+ self.transform_act_fn = config.hidden_act
595
+
596
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
597
+ self.config = config
598
+
599
+ def call(self, hidden_states):
600
+ hidden_states = self.dense(hidden_states)
601
+ hidden_states = self.transform_act_fn(hidden_states)
602
+ hidden_states = self.LayerNorm(hidden_states)
603
+
604
+ return hidden_states
605
+
606
+ def build(self, input_shape=None):
607
+ if self.built:
608
+ return
609
+ self.built = True
610
+ if getattr(self, "dense", None) is not None:
611
+ with tf.name_scope(self.dense.name):
612
+ self.dense.build([None, None, self.config.hidden_size])
613
+ if getattr(self, "LayerNorm", None) is not None:
614
+ with tf.name_scope(self.LayerNorm.name):
615
+ self.LayerNorm.build([None, None, self.config.hidden_size])
616
+
617
+
618
+ @keras_serializable
619
+ class TFConvBertMainLayer(keras.layers.Layer):
620
+ config_class = ConvBertConfig
621
+
622
+ def __init__(self, config, **kwargs):
623
+ super().__init__(**kwargs)
624
+
625
+ self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
626
+
627
+ if config.embedding_size != config.hidden_size:
628
+ self.embeddings_project = keras.layers.Dense(config.hidden_size, name="embeddings_project")
629
+
630
+ self.encoder = TFConvBertEncoder(config, name="encoder")
631
+ self.config = config
632
+
633
+ def get_input_embeddings(self):
634
+ return self.embeddings
635
+
636
+ def set_input_embeddings(self, value):
637
+ self.embeddings.weight = value
638
+ self.embeddings.vocab_size = value.shape[0]
639
+
640
+ def _prune_heads(self, heads_to_prune):
641
+ """
642
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
643
+ class PreTrainedModel
644
+ """
645
+ raise NotImplementedError
646
+
647
+ def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
648
+ if attention_mask is None:
649
+ attention_mask = tf.fill(input_shape, 1)
650
+
651
+ # We create a 3D attention mask from a 2D tensor mask.
652
+ # Sizes are [batch_size, 1, 1, to_seq_length]
653
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
654
+ # this attention mask is more simple than the triangular masking of causal attention
655
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
656
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
657
+
658
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
659
+ # masked positions, this operation will create a tensor which is 0.0 for
660
+ # positions we want to attend and -10000.0 for masked positions.
661
+ # Since we are adding it to the raw scores before the softmax, this is
662
+ # effectively the same as removing these entirely.
663
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype)
664
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
665
+
666
+ return extended_attention_mask
667
+
668
+ def get_head_mask(self, head_mask):
669
+ if head_mask is not None:
670
+ raise NotImplementedError
671
+ else:
672
+ head_mask = [None] * self.config.num_hidden_layers
673
+
674
+ return head_mask
675
+
676
+ @unpack_inputs
677
+ def call(
678
+ self,
679
+ input_ids=None,
680
+ attention_mask=None,
681
+ token_type_ids=None,
682
+ position_ids=None,
683
+ head_mask=None,
684
+ inputs_embeds=None,
685
+ output_attentions=None,
686
+ output_hidden_states=None,
687
+ return_dict=None,
688
+ training=False,
689
+ ):
690
+ if input_ids is not None and inputs_embeds is not None:
691
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
692
+ elif input_ids is not None:
693
+ input_shape = shape_list(input_ids)
694
+ elif inputs_embeds is not None:
695
+ input_shape = shape_list(inputs_embeds)[:-1]
696
+ else:
697
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
698
+
699
+ if attention_mask is None:
700
+ attention_mask = tf.fill(input_shape, 1)
701
+
702
+ if token_type_ids is None:
703
+ token_type_ids = tf.fill(input_shape, 0)
704
+
705
+ hidden_states = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)
706
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype)
707
+ head_mask = self.get_head_mask(head_mask)
708
+
709
+ if hasattr(self, "embeddings_project"):
710
+ hidden_states = self.embeddings_project(hidden_states, training=training)
711
+
712
+ hidden_states = self.encoder(
713
+ hidden_states,
714
+ extended_attention_mask,
715
+ head_mask,
716
+ output_attentions,
717
+ output_hidden_states,
718
+ return_dict,
719
+ training=training,
720
+ )
721
+
722
+ return hidden_states
723
+
724
+ def build(self, input_shape=None):
725
+ if self.built:
726
+ return
727
+ self.built = True
728
+ if getattr(self, "embeddings", None) is not None:
729
+ with tf.name_scope(self.embeddings.name):
730
+ self.embeddings.build(None)
731
+ if getattr(self, "encoder", None) is not None:
732
+ with tf.name_scope(self.encoder.name):
733
+ self.encoder.build(None)
734
+ if getattr(self, "embeddings_project", None) is not None:
735
+ with tf.name_scope(self.embeddings_project.name):
736
+ self.embeddings_project.build([None, None, self.config.embedding_size])
737
+
738
+
739
+ class TFConvBertPreTrainedModel(TFPreTrainedModel):
740
+ """
741
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
742
+ models.
743
+ """
744
+
745
+ config_class = ConvBertConfig
746
+ base_model_prefix = "convbert"
747
+
748
+
749
+ CONVBERT_START_DOCSTRING = r"""
750
+
751
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
752
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
753
+ etc.)
754
+
755
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
756
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
757
+ behavior.
758
+
759
+ <Tip>
760
+
761
+ TensorFlow models and layers in `transformers` accept two formats as input:
762
+
763
+ - having all inputs as keyword arguments (like PyTorch models), or
764
+ - having all inputs as a list, tuple or dict in the first positional argument.
765
+
766
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
767
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
768
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
769
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
770
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
771
+ positional argument:
772
+
773
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
774
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
775
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
776
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
777
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
778
+
779
+ Note that when creating models and layers with
780
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
781
+ about any of this, as you can just pass inputs like you would to any other Python function!
782
+
783
+ </Tip>
784
+
785
+ Args:
786
+ config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
787
+ Initializing with a config file does not load the weights associated with the model, only the
788
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
789
+ """
790
+
791
+ CONVBERT_INPUTS_DOCSTRING = r"""
792
+ Args:
793
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
794
+ Indices of input sequence tokens in the vocabulary.
795
+
796
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
797
+ [`PreTrainedTokenizer.encode`] for details.
798
+
799
+ [What are input IDs?](../glossary#input-ids)
800
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
801
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
802
+
803
+ - 1 for tokens that are **not masked**,
804
+ - 0 for tokens that are **masked**.
805
+
806
+ [What are attention masks?](../glossary#attention-mask)
807
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
808
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
809
+ 1]`:
810
+
811
+ - 0 corresponds to a *sentence A* token,
812
+ - 1 corresponds to a *sentence B* token.
813
+
814
+ [What are token type IDs?](../glossary#token-type-ids)
815
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
816
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
817
+ config.max_position_embeddings - 1]`.
818
+
819
+ [What are position IDs?](../glossary#position-ids)
820
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
821
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
822
+
823
+ - 1 indicates the head is **not masked**,
824
+ - 0 indicates the head is **masked**.
825
+
826
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
827
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
828
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
829
+ model's internal embedding lookup matrix.
830
+ output_attentions (`bool`, *optional*):
831
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
832
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
833
+ config will be used instead.
834
+ output_hidden_states (`bool`, *optional*):
835
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
836
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
837
+ used instead.
838
+ return_dict (`bool`, *optional*):
839
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
840
+ eager mode, in graph mode the value will always be set to True.
841
+ training (`bool`, *optional*, defaults to `False`):
842
+ Whether or not to use the model in training mode (some modules like dropout modules have different
843
+ behaviors between training and evaluation).
844
+ """
845
+
846
+
847
+ @add_start_docstrings(
848
+ "The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
849
+ CONVBERT_START_DOCSTRING,
850
+ )
851
+ class TFConvBertModel(TFConvBertPreTrainedModel):
852
+ def __init__(self, config, *inputs, **kwargs):
853
+ super().__init__(config, *inputs, **kwargs)
854
+
855
+ self.convbert = TFConvBertMainLayer(config, name="convbert")
856
+
857
+ @unpack_inputs
858
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
859
+ @add_code_sample_docstrings(
860
+ checkpoint=_CHECKPOINT_FOR_DOC,
861
+ output_type=TFBaseModelOutput,
862
+ config_class=_CONFIG_FOR_DOC,
863
+ )
864
+ def call(
865
+ self,
866
+ input_ids: TFModelInputType | None = None,
867
+ attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
868
+ token_type_ids: Optional[Union[np.array, tf.Tensor]] = None,
869
+ position_ids: Optional[Union[np.array, tf.Tensor]] = None,
870
+ head_mask: Optional[Union[np.array, tf.Tensor]] = None,
871
+ inputs_embeds: tf.Tensor | None = None,
872
+ output_attentions: Optional[bool] = None,
873
+ output_hidden_states: Optional[bool] = None,
874
+ return_dict: Optional[bool] = None,
875
+ training: bool = False,
876
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
877
+ outputs = self.convbert(
878
+ input_ids=input_ids,
879
+ attention_mask=attention_mask,
880
+ token_type_ids=token_type_ids,
881
+ position_ids=position_ids,
882
+ head_mask=head_mask,
883
+ inputs_embeds=inputs_embeds,
884
+ output_attentions=output_attentions,
885
+ output_hidden_states=output_hidden_states,
886
+ return_dict=return_dict,
887
+ training=training,
888
+ )
889
+
890
+ return outputs
891
+
892
+ def build(self, input_shape=None):
893
+ if self.built:
894
+ return
895
+ self.built = True
896
+ if getattr(self, "convbert", None) is not None:
897
+ with tf.name_scope(self.convbert.name):
898
+ self.convbert.build(None)
899
+
900
+
901
+ class TFConvBertMaskedLMHead(keras.layers.Layer):
902
+ def __init__(self, config, input_embeddings, **kwargs):
903
+ super().__init__(**kwargs)
904
+
905
+ self.config = config
906
+ self.embedding_size = config.embedding_size
907
+ self.input_embeddings = input_embeddings
908
+
909
+ def build(self, input_shape):
910
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
911
+
912
+ super().build(input_shape)
913
+
914
+ def get_output_embeddings(self):
915
+ return self.input_embeddings
916
+
917
+ def set_output_embeddings(self, value):
918
+ self.input_embeddings.weight = value
919
+ self.input_embeddings.vocab_size = shape_list(value)[0]
920
+
921
+ def get_bias(self):
922
+ return {"bias": self.bias}
923
+
924
+ def set_bias(self, value):
925
+ self.bias = value["bias"]
926
+ self.config.vocab_size = shape_list(value["bias"])[0]
927
+
928
+ def call(self, hidden_states):
929
+ seq_length = shape_list(tensor=hidden_states)[1]
930
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
931
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
932
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
933
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
934
+
935
+ return hidden_states
936
+
937
+
938
+ class TFConvBertGeneratorPredictions(keras.layers.Layer):
939
+ def __init__(self, config, **kwargs):
940
+ super().__init__(**kwargs)
941
+
942
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
943
+ self.dense = keras.layers.Dense(config.embedding_size, name="dense")
944
+ self.config = config
945
+
946
+ def call(self, generator_hidden_states, training=False):
947
+ hidden_states = self.dense(generator_hidden_states)
948
+ hidden_states = get_tf_activation("gelu")(hidden_states)
949
+ hidden_states = self.LayerNorm(hidden_states)
950
+
951
+ return hidden_states
952
+
953
+ def build(self, input_shape=None):
954
+ if self.built:
955
+ return
956
+ self.built = True
957
+ if getattr(self, "LayerNorm", None) is not None:
958
+ with tf.name_scope(self.LayerNorm.name):
959
+ self.LayerNorm.build([None, None, self.config.embedding_size])
960
+ if getattr(self, "dense", None) is not None:
961
+ with tf.name_scope(self.dense.name):
962
+ self.dense.build([None, None, self.config.hidden_size])
963
+
964
+
965
+ @add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
966
+ class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
967
+ def __init__(self, config, *inputs, **kwargs):
968
+ super().__init__(config, **kwargs)
969
+
970
+ self.config = config
971
+ self.convbert = TFConvBertMainLayer(config, name="convbert")
972
+ self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
973
+
974
+ if isinstance(config.hidden_act, str):
975
+ self.activation = get_tf_activation(config.hidden_act)
976
+ else:
977
+ self.activation = config.hidden_act
978
+
979
+ self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
980
+
981
+ def get_lm_head(self):
982
+ return self.generator_lm_head
983
+
984
+ def get_prefix_bias_name(self):
985
+ return self.name + "/" + self.generator_lm_head.name
986
+
987
+ @unpack_inputs
988
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
989
+ @add_code_sample_docstrings(
990
+ checkpoint=_CHECKPOINT_FOR_DOC,
991
+ output_type=TFMaskedLMOutput,
992
+ config_class=_CONFIG_FOR_DOC,
993
+ )
994
+ def call(
995
+ self,
996
+ input_ids: TFModelInputType | None = None,
997
+ attention_mask: np.ndarray | tf.Tensor | None = None,
998
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
999
+ position_ids: np.ndarray | tf.Tensor | None = None,
1000
+ head_mask: np.ndarray | tf.Tensor | None = None,
1001
+ inputs_embeds: tf.Tensor | None = None,
1002
+ output_attentions: Optional[bool] = None,
1003
+ output_hidden_states: Optional[bool] = None,
1004
+ return_dict: Optional[bool] = None,
1005
+ labels: tf.Tensor | None = None,
1006
+ training: Optional[bool] = False,
1007
+ ) -> Union[Tuple, TFMaskedLMOutput]:
1008
+ r"""
1009
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1010
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1011
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1012
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1013
+ """
1014
+ generator_hidden_states = self.convbert(
1015
+ input_ids=input_ids,
1016
+ attention_mask=attention_mask,
1017
+ token_type_ids=token_type_ids,
1018
+ position_ids=position_ids,
1019
+ head_mask=head_mask,
1020
+ inputs_embeds=inputs_embeds,
1021
+ output_attentions=output_attentions,
1022
+ output_hidden_states=output_hidden_states,
1023
+ return_dict=return_dict,
1024
+ training=training,
1025
+ )
1026
+ generator_sequence_output = generator_hidden_states[0]
1027
+ prediction_scores = self.generator_predictions(generator_sequence_output, training=training)
1028
+ prediction_scores = self.generator_lm_head(prediction_scores, training=training)
1029
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
1030
+
1031
+ if not return_dict:
1032
+ output = (prediction_scores,) + generator_hidden_states[1:]
1033
+
1034
+ return ((loss,) + output) if loss is not None else output
1035
+
1036
+ return TFMaskedLMOutput(
1037
+ loss=loss,
1038
+ logits=prediction_scores,
1039
+ hidden_states=generator_hidden_states.hidden_states,
1040
+ attentions=generator_hidden_states.attentions,
1041
+ )
1042
+
1043
+ def build(self, input_shape=None):
1044
+ if self.built:
1045
+ return
1046
+ self.built = True
1047
+ if getattr(self, "convbert", None) is not None:
1048
+ with tf.name_scope(self.convbert.name):
1049
+ self.convbert.build(None)
1050
+ if getattr(self, "generator_predictions", None) is not None:
1051
+ with tf.name_scope(self.generator_predictions.name):
1052
+ self.generator_predictions.build(None)
1053
+ if getattr(self, "generator_lm_head", None) is not None:
1054
+ with tf.name_scope(self.generator_lm_head.name):
1055
+ self.generator_lm_head.build(None)
1056
+
1057
+
1058
+ class TFConvBertClassificationHead(keras.layers.Layer):
1059
+ """Head for sentence-level classification tasks."""
1060
+
1061
+ def __init__(self, config, **kwargs):
1062
+ super().__init__(**kwargs)
1063
+
1064
+ self.dense = keras.layers.Dense(
1065
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1066
+ )
1067
+ classifier_dropout = (
1068
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1069
+ )
1070
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1071
+ self.out_proj = keras.layers.Dense(
1072
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
1073
+ )
1074
+
1075
+ self.config = config
1076
+
1077
+ def call(self, hidden_states, **kwargs):
1078
+ x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
1079
+ x = self.dropout(x)
1080
+ x = self.dense(x)
1081
+ x = get_tf_activation(self.config.hidden_act)(x)
1082
+ x = self.dropout(x)
1083
+ x = self.out_proj(x)
1084
+
1085
+ return x
1086
+
1087
+ def build(self, input_shape=None):
1088
+ if self.built:
1089
+ return
1090
+ self.built = True
1091
+ if getattr(self, "dense", None) is not None:
1092
+ with tf.name_scope(self.dense.name):
1093
+ self.dense.build([None, None, self.config.hidden_size])
1094
+ if getattr(self, "out_proj", None) is not None:
1095
+ with tf.name_scope(self.out_proj.name):
1096
+ self.out_proj.build([None, None, self.config.hidden_size])
1097
+
1098
+
1099
+ @add_start_docstrings(
1100
+ """
1101
+ ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
1102
+ """,
1103
+ CONVBERT_START_DOCSTRING,
1104
+ )
1105
+ class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
1106
+ def __init__(self, config, *inputs, **kwargs):
1107
+ super().__init__(config, *inputs, **kwargs)
1108
+ self.num_labels = config.num_labels
1109
+ self.convbert = TFConvBertMainLayer(config, name="convbert")
1110
+ self.classifier = TFConvBertClassificationHead(config, name="classifier")
1111
+
1112
+ @unpack_inputs
1113
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1114
+ @add_code_sample_docstrings(
1115
+ checkpoint=_CHECKPOINT_FOR_DOC,
1116
+ output_type=TFSequenceClassifierOutput,
1117
+ config_class=_CONFIG_FOR_DOC,
1118
+ )
1119
+ def call(
1120
+ self,
1121
+ input_ids: TFModelInputType | None = None,
1122
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1123
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1124
+ position_ids: np.ndarray | tf.Tensor | None = None,
1125
+ head_mask: np.ndarray | tf.Tensor | None = None,
1126
+ inputs_embeds: tf.Tensor | None = None,
1127
+ output_attentions: Optional[bool] = None,
1128
+ output_hidden_states: Optional[bool] = None,
1129
+ return_dict: Optional[bool] = None,
1130
+ labels: tf.Tensor | None = None,
1131
+ training: Optional[bool] = False,
1132
+ ) -> Union[Tuple, TFSequenceClassifierOutput]:
1133
+ r"""
1134
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1135
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1136
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1137
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1138
+ """
1139
+ outputs = self.convbert(
1140
+ input_ids,
1141
+ attention_mask=attention_mask,
1142
+ token_type_ids=token_type_ids,
1143
+ position_ids=position_ids,
1144
+ head_mask=head_mask,
1145
+ inputs_embeds=inputs_embeds,
1146
+ output_attentions=output_attentions,
1147
+ output_hidden_states=output_hidden_states,
1148
+ return_dict=return_dict,
1149
+ training=training,
1150
+ )
1151
+ logits = self.classifier(outputs[0], training=training)
1152
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1153
+
1154
+ if not return_dict:
1155
+ output = (logits,) + outputs[1:]
1156
+
1157
+ return ((loss,) + output) if loss is not None else output
1158
+
1159
+ return TFSequenceClassifierOutput(
1160
+ loss=loss,
1161
+ logits=logits,
1162
+ hidden_states=outputs.hidden_states,
1163
+ attentions=outputs.attentions,
1164
+ )
1165
+
1166
+ def build(self, input_shape=None):
1167
+ if self.built:
1168
+ return
1169
+ self.built = True
1170
+ if getattr(self, "convbert", None) is not None:
1171
+ with tf.name_scope(self.convbert.name):
1172
+ self.convbert.build(None)
1173
+ if getattr(self, "classifier", None) is not None:
1174
+ with tf.name_scope(self.classifier.name):
1175
+ self.classifier.build(None)
1176
+
1177
+
1178
+ @add_start_docstrings(
1179
+ """
1180
+ ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1181
+ softmax) e.g. for RocStories/SWAG tasks.
1182
+ """,
1183
+ CONVBERT_START_DOCSTRING,
1184
+ )
1185
+ class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
1186
+ def __init__(self, config, *inputs, **kwargs):
1187
+ super().__init__(config, *inputs, **kwargs)
1188
+
1189
+ self.convbert = TFConvBertMainLayer(config, name="convbert")
1190
+ self.sequence_summary = TFSequenceSummary(
1191
+ config, initializer_range=config.initializer_range, name="sequence_summary"
1192
+ )
1193
+ self.classifier = keras.layers.Dense(
1194
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1195
+ )
1196
+ self.config = config
1197
+
1198
+ @unpack_inputs
1199
+ @add_start_docstrings_to_model_forward(
1200
+ CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1201
+ )
1202
+ @add_code_sample_docstrings(
1203
+ checkpoint=_CHECKPOINT_FOR_DOC,
1204
+ output_type=TFMultipleChoiceModelOutput,
1205
+ config_class=_CONFIG_FOR_DOC,
1206
+ )
1207
+ def call(
1208
+ self,
1209
+ input_ids: TFModelInputType | None = None,
1210
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1211
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1212
+ position_ids: np.ndarray | tf.Tensor | None = None,
1213
+ head_mask: np.ndarray | tf.Tensor | None = None,
1214
+ inputs_embeds: tf.Tensor | None = None,
1215
+ output_attentions: Optional[bool] = None,
1216
+ output_hidden_states: Optional[bool] = None,
1217
+ return_dict: Optional[bool] = None,
1218
+ labels: tf.Tensor | None = None,
1219
+ training: Optional[bool] = False,
1220
+ ) -> Union[Tuple, TFMultipleChoiceModelOutput]:
1221
+ r"""
1222
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1223
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
1224
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
1225
+ """
1226
+ if input_ids is not None:
1227
+ num_choices = shape_list(input_ids)[1]
1228
+ seq_length = shape_list(input_ids)[2]
1229
+ else:
1230
+ num_choices = shape_list(inputs_embeds)[1]
1231
+ seq_length = shape_list(inputs_embeds)[2]
1232
+
1233
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
1234
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
1235
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
1236
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
1237
+ flat_inputs_embeds = (
1238
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
1239
+ if inputs_embeds is not None
1240
+ else None
1241
+ )
1242
+ outputs = self.convbert(
1243
+ flat_input_ids,
1244
+ flat_attention_mask,
1245
+ flat_token_type_ids,
1246
+ flat_position_ids,
1247
+ head_mask,
1248
+ flat_inputs_embeds,
1249
+ output_attentions,
1250
+ output_hidden_states,
1251
+ return_dict=return_dict,
1252
+ training=training,
1253
+ )
1254
+ logits = self.sequence_summary(outputs[0], training=training)
1255
+ logits = self.classifier(logits)
1256
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1257
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1258
+
1259
+ if not return_dict:
1260
+ output = (reshaped_logits,) + outputs[1:]
1261
+
1262
+ return ((loss,) + output) if loss is not None else output
1263
+
1264
+ return TFMultipleChoiceModelOutput(
1265
+ loss=loss,
1266
+ logits=reshaped_logits,
1267
+ hidden_states=outputs.hidden_states,
1268
+ attentions=outputs.attentions,
1269
+ )
1270
+
1271
+ def build(self, input_shape=None):
1272
+ if self.built:
1273
+ return
1274
+ self.built = True
1275
+ if getattr(self, "convbert", None) is not None:
1276
+ with tf.name_scope(self.convbert.name):
1277
+ self.convbert.build(None)
1278
+ if getattr(self, "sequence_summary", None) is not None:
1279
+ with tf.name_scope(self.sequence_summary.name):
1280
+ self.sequence_summary.build(None)
1281
+ if getattr(self, "classifier", None) is not None:
1282
+ with tf.name_scope(self.classifier.name):
1283
+ self.classifier.build([None, None, self.config.hidden_size])
1284
+
1285
+
1286
+ @add_start_docstrings(
1287
+ """
1288
+ ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1289
+ Named-Entity-Recognition (NER) tasks.
1290
+ """,
1291
+ CONVBERT_START_DOCSTRING,
1292
+ )
1293
+ class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
1294
+ def __init__(self, config, *inputs, **kwargs):
1295
+ super().__init__(config, *inputs, **kwargs)
1296
+
1297
+ self.num_labels = config.num_labels
1298
+ self.convbert = TFConvBertMainLayer(config, name="convbert")
1299
+ classifier_dropout = (
1300
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1301
+ )
1302
+ self.dropout = keras.layers.Dropout(classifier_dropout)
1303
+ self.classifier = keras.layers.Dense(
1304
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
1305
+ )
1306
+ self.config = config
1307
+
1308
+ @unpack_inputs
1309
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1310
+ @add_code_sample_docstrings(
1311
+ checkpoint=_CHECKPOINT_FOR_DOC,
1312
+ output_type=TFTokenClassifierOutput,
1313
+ config_class=_CONFIG_FOR_DOC,
1314
+ )
1315
+ def call(
1316
+ self,
1317
+ input_ids: TFModelInputType | None = None,
1318
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1319
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1320
+ position_ids: np.ndarray | tf.Tensor | None = None,
1321
+ head_mask: np.ndarray | tf.Tensor | None = None,
1322
+ inputs_embeds: tf.Tensor | None = None,
1323
+ output_attentions: Optional[bool] = None,
1324
+ output_hidden_states: Optional[bool] = None,
1325
+ return_dict: Optional[bool] = None,
1326
+ labels: tf.Tensor | None = None,
1327
+ training: Optional[bool] = False,
1328
+ ) -> Union[Tuple, TFTokenClassifierOutput]:
1329
+ r"""
1330
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1331
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1332
+ """
1333
+ outputs = self.convbert(
1334
+ input_ids,
1335
+ attention_mask=attention_mask,
1336
+ token_type_ids=token_type_ids,
1337
+ position_ids=position_ids,
1338
+ head_mask=head_mask,
1339
+ inputs_embeds=inputs_embeds,
1340
+ output_attentions=output_attentions,
1341
+ output_hidden_states=output_hidden_states,
1342
+ return_dict=return_dict,
1343
+ training=training,
1344
+ )
1345
+ sequence_output = outputs[0]
1346
+ sequence_output = self.dropout(sequence_output, training=training)
1347
+ logits = self.classifier(sequence_output)
1348
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1349
+
1350
+ if not return_dict:
1351
+ output = (logits,) + outputs[1:]
1352
+ return ((loss,) + output) if loss is not None else output
1353
+
1354
+ return TFTokenClassifierOutput(
1355
+ loss=loss,
1356
+ logits=logits,
1357
+ hidden_states=outputs.hidden_states,
1358
+ attentions=outputs.attentions,
1359
+ )
1360
+
1361
+ def build(self, input_shape=None):
1362
+ if self.built:
1363
+ return
1364
+ self.built = True
1365
+ if getattr(self, "convbert", None) is not None:
1366
+ with tf.name_scope(self.convbert.name):
1367
+ self.convbert.build(None)
1368
+ if getattr(self, "classifier", None) is not None:
1369
+ with tf.name_scope(self.classifier.name):
1370
+ self.classifier.build([None, None, self.config.hidden_size])
1371
+
1372
+
1373
+ @add_start_docstrings(
1374
+ """
1375
+ ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1376
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1377
+ """,
1378
+ CONVBERT_START_DOCSTRING,
1379
+ )
1380
+ class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
1381
+ def __init__(self, config, *inputs, **kwargs):
1382
+ super().__init__(config, *inputs, **kwargs)
1383
+
1384
+ self.num_labels = config.num_labels
1385
+ self.convbert = TFConvBertMainLayer(config, name="convbert")
1386
+ self.qa_outputs = keras.layers.Dense(
1387
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1388
+ )
1389
+ self.config = config
1390
+
1391
+ @unpack_inputs
1392
+ @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1393
+ @add_code_sample_docstrings(
1394
+ checkpoint=_CHECKPOINT_FOR_DOC,
1395
+ output_type=TFQuestionAnsweringModelOutput,
1396
+ config_class=_CONFIG_FOR_DOC,
1397
+ )
1398
+ def call(
1399
+ self,
1400
+ input_ids: TFModelInputType | None = None,
1401
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1402
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1403
+ position_ids: np.ndarray | tf.Tensor | None = None,
1404
+ head_mask: np.ndarray | tf.Tensor | None = None,
1405
+ inputs_embeds: tf.Tensor | None = None,
1406
+ output_attentions: Optional[bool] = None,
1407
+ output_hidden_states: Optional[bool] = None,
1408
+ return_dict: Optional[bool] = None,
1409
+ start_positions: tf.Tensor | None = None,
1410
+ end_positions: tf.Tensor | None = None,
1411
+ training: Optional[bool] = False,
1412
+ ) -> Union[Tuple, TFQuestionAnsweringModelOutput]:
1413
+ r"""
1414
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1415
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1416
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1417
+ are not taken into account for computing the loss.
1418
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1419
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1420
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1421
+ are not taken into account for computing the loss.
1422
+ """
1423
+ outputs = self.convbert(
1424
+ input_ids,
1425
+ attention_mask=attention_mask,
1426
+ token_type_ids=token_type_ids,
1427
+ position_ids=position_ids,
1428
+ head_mask=head_mask,
1429
+ inputs_embeds=inputs_embeds,
1430
+ output_attentions=output_attentions,
1431
+ output_hidden_states=output_hidden_states,
1432
+ return_dict=return_dict,
1433
+ training=training,
1434
+ )
1435
+ sequence_output = outputs[0]
1436
+ logits = self.qa_outputs(sequence_output)
1437
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1438
+ start_logits = tf.squeeze(start_logits, axis=-1)
1439
+ end_logits = tf.squeeze(end_logits, axis=-1)
1440
+ loss = None
1441
+
1442
+ if start_positions is not None and end_positions is not None:
1443
+ labels = {"start_position": start_positions}
1444
+ labels["end_position"] = end_positions
1445
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1446
+
1447
+ if not return_dict:
1448
+ output = (start_logits, end_logits) + outputs[1:]
1449
+ return ((loss,) + output) if loss is not None else output
1450
+
1451
+ return TFQuestionAnsweringModelOutput(
1452
+ loss=loss,
1453
+ start_logits=start_logits,
1454
+ end_logits=end_logits,
1455
+ hidden_states=outputs.hidden_states,
1456
+ attentions=outputs.attentions,
1457
+ )
1458
+
1459
+ def build(self, input_shape=None):
1460
+ if self.built:
1461
+ return
1462
+ self.built = True
1463
+ if getattr(self, "convbert", None) is not None:
1464
+ with tf.name_scope(self.convbert.name):
1465
+ self.convbert.build(None)
1466
+ if getattr(self, "qa_outputs", None) is not None:
1467
+ with tf.name_scope(self.qa_outputs.name):
1468
+ self.qa_outputs.build([None, None, self.config.hidden_size])
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for ConvBERT."""
16
+ import collections
17
+ import os
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
28
+
29
+
30
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
31
+ def load_vocab(vocab_file):
32
+ """Loads a vocabulary file into a dictionary."""
33
+ vocab = collections.OrderedDict()
34
+ with open(vocab_file, "r", encoding="utf-8") as reader:
35
+ tokens = reader.readlines()
36
+ for index, token in enumerate(tokens):
37
+ token = token.rstrip("\n")
38
+ vocab[token] = index
39
+ return vocab
40
+
41
+
42
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
43
+ def whitespace_tokenize(text):
44
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
45
+ text = text.strip()
46
+ if not text:
47
+ return []
48
+ tokens = text.split()
49
+ return tokens
50
+
51
+
52
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->YituTech/conv-bert-base, ConvBertTokenizer->BertTokenizer, BERT->ConvBERT
53
+ class ConvBertTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Construct a ConvBERT tokenizer. Based on WordPiece.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ do_lower_case (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to lowercase the input when tokenizing.
65
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to do basic tokenization before WordPiece.
67
+ never_split (`Iterable`, *optional*):
68
+ Collection of tokens which will never be split during tokenization. Only has an effect when
69
+ `do_basic_tokenize=True`
70
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
74
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
75
+ sequence classification or for a text and a question for question answering. It is also used as the last
76
+ token of a sequence built with special tokens.
77
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
78
+ The token used for padding, for example when batching sequences of different lengths.
79
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
80
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
81
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
82
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to tokenize Chinese characters.
87
+
88
+ This should likely be deactivated for Japanese (see this
89
+ [issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original ConvBERT).
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+
97
+ def __init__(
98
+ self,
99
+ vocab_file,
100
+ do_lower_case=True,
101
+ do_basic_tokenize=True,
102
+ never_split=None,
103
+ unk_token="[UNK]",
104
+ sep_token="[SEP]",
105
+ pad_token="[PAD]",
106
+ cls_token="[CLS]",
107
+ mask_token="[MASK]",
108
+ tokenize_chinese_chars=True,
109
+ strip_accents=None,
110
+ **kwargs,
111
+ ):
112
+ if not os.path.isfile(vocab_file):
113
+ raise ValueError(
114
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
115
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
116
+ )
117
+ self.vocab = load_vocab(vocab_file)
118
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
119
+ self.do_basic_tokenize = do_basic_tokenize
120
+ if do_basic_tokenize:
121
+ self.basic_tokenizer = BasicTokenizer(
122
+ do_lower_case=do_lower_case,
123
+ never_split=never_split,
124
+ tokenize_chinese_chars=tokenize_chinese_chars,
125
+ strip_accents=strip_accents,
126
+ )
127
+
128
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
129
+
130
+ super().__init__(
131
+ do_lower_case=do_lower_case,
132
+ do_basic_tokenize=do_basic_tokenize,
133
+ never_split=never_split,
134
+ unk_token=unk_token,
135
+ sep_token=sep_token,
136
+ pad_token=pad_token,
137
+ cls_token=cls_token,
138
+ mask_token=mask_token,
139
+ tokenize_chinese_chars=tokenize_chinese_chars,
140
+ strip_accents=strip_accents,
141
+ **kwargs,
142
+ )
143
+
144
+ @property
145
+ def do_lower_case(self):
146
+ return self.basic_tokenizer.do_lower_case
147
+
148
+ @property
149
+ def vocab_size(self):
150
+ return len(self.vocab)
151
+
152
+ def get_vocab(self):
153
+ return dict(self.vocab, **self.added_tokens_encoder)
154
+
155
+ def _tokenize(self, text, split_special_tokens=False):
156
+ split_tokens = []
157
+ if self.do_basic_tokenize:
158
+ for token in self.basic_tokenizer.tokenize(
159
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
160
+ ):
161
+ # If the token is part of the never_split set
162
+ if token in self.basic_tokenizer.never_split:
163
+ split_tokens.append(token)
164
+ else:
165
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
166
+ else:
167
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
168
+ return split_tokens
169
+
170
+ def _convert_token_to_id(self, token):
171
+ """Converts a token (str) in an id using the vocab."""
172
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
173
+
174
+ def _convert_id_to_token(self, index):
175
+ """Converts an index (integer) in a token (str) using the vocab."""
176
+ return self.ids_to_tokens.get(index, self.unk_token)
177
+
178
+ def convert_tokens_to_string(self, tokens):
179
+ """Converts a sequence of tokens (string) in a single string."""
180
+ out_string = " ".join(tokens).replace(" ##", "").strip()
181
+ return out_string
182
+
183
+ def build_inputs_with_special_tokens(
184
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
185
+ ) -> List[int]:
186
+ """
187
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
188
+ adding special tokens. A ConvBERT sequence has the following format:
189
+
190
+ - single sequence: `[CLS] X [SEP]`
191
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
192
+
193
+ Args:
194
+ token_ids_0 (`List[int]`):
195
+ List of IDs to which the special tokens will be added.
196
+ token_ids_1 (`List[int]`, *optional*):
197
+ Optional second list of IDs for sequence pairs.
198
+
199
+ Returns:
200
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
201
+ """
202
+ if token_ids_1 is None:
203
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
204
+ cls = [self.cls_token_id]
205
+ sep = [self.sep_token_id]
206
+ return cls + token_ids_0 + sep + token_ids_1 + sep
207
+
208
+ def get_special_tokens_mask(
209
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
210
+ ) -> List[int]:
211
+ """
212
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
213
+ special tokens using the tokenizer `prepare_for_model` method.
214
+
215
+ Args:
216
+ token_ids_0 (`List[int]`):
217
+ List of IDs.
218
+ token_ids_1 (`List[int]`, *optional*):
219
+ Optional second list of IDs for sequence pairs.
220
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
221
+ Whether or not the token list is already formatted with special tokens for the model.
222
+
223
+ Returns:
224
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
225
+ """
226
+
227
+ if already_has_special_tokens:
228
+ return super().get_special_tokens_mask(
229
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
230
+ )
231
+
232
+ if token_ids_1 is not None:
233
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
234
+ return [1] + ([0] * len(token_ids_0)) + [1]
235
+
236
+ def create_token_type_ids_from_sequences(
237
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
238
+ ) -> List[int]:
239
+ """
240
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence
241
+ pair mask has the following format:
242
+
243
+ ```
244
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
245
+ | first sequence | second sequence |
246
+ ```
247
+
248
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
249
+
250
+ Args:
251
+ token_ids_0 (`List[int]`):
252
+ List of IDs.
253
+ token_ids_1 (`List[int]`, *optional*):
254
+ Optional second list of IDs for sequence pairs.
255
+
256
+ Returns:
257
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
258
+ """
259
+ sep = [self.sep_token_id]
260
+ cls = [self.cls_token_id]
261
+ if token_ids_1 is None:
262
+ return len(cls + token_ids_0 + sep) * [0]
263
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
264
+
265
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
266
+ index = 0
267
+ if os.path.isdir(save_directory):
268
+ vocab_file = os.path.join(
269
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
270
+ )
271
+ else:
272
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
273
+ with open(vocab_file, "w", encoding="utf-8") as writer:
274
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
275
+ if index != token_index:
276
+ logger.warning(
277
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
278
+ " Please check that the vocabulary is not corrupted!"
279
+ )
280
+ index = token_index
281
+ writer.write(token + "\n")
282
+ index += 1
283
+ return (vocab_file,)
284
+
285
+
286
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
287
+ class BasicTokenizer(object):
288
+ """
289
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
290
+
291
+ Args:
292
+ do_lower_case (`bool`, *optional*, defaults to `True`):
293
+ Whether or not to lowercase the input when tokenizing.
294
+ never_split (`Iterable`, *optional*):
295
+ Collection of tokens which will never be split during tokenization. Only has an effect when
296
+ `do_basic_tokenize=True`
297
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
298
+ Whether or not to tokenize Chinese characters.
299
+
300
+ This should likely be deactivated for Japanese (see this
301
+ [issue](https://github.com/huggingface/transformers/issues/328)).
302
+ strip_accents (`bool`, *optional*):
303
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
304
+ value for `lowercase` (as in the original BERT).
305
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
306
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
307
+ the full context of the words, such as contractions.
308
+ """
309
+
310
+ def __init__(
311
+ self,
312
+ do_lower_case=True,
313
+ never_split=None,
314
+ tokenize_chinese_chars=True,
315
+ strip_accents=None,
316
+ do_split_on_punc=True,
317
+ ):
318
+ if never_split is None:
319
+ never_split = []
320
+ self.do_lower_case = do_lower_case
321
+ self.never_split = set(never_split)
322
+ self.tokenize_chinese_chars = tokenize_chinese_chars
323
+ self.strip_accents = strip_accents
324
+ self.do_split_on_punc = do_split_on_punc
325
+
326
+ def tokenize(self, text, never_split=None):
327
+ """
328
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
329
+
330
+ Args:
331
+ never_split (`List[str]`, *optional*)
332
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
333
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
334
+ """
335
+ # union() returns a new set by concatenating the two sets.
336
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
337
+ text = self._clean_text(text)
338
+
339
+ # This was added on November 1st, 2018 for the multilingual and Chinese
340
+ # models. This is also applied to the English models now, but it doesn't
341
+ # matter since the English models were not trained on any Chinese data
342
+ # and generally don't have any Chinese data in them (there are Chinese
343
+ # characters in the vocabulary because Wikipedia does have some Chinese
344
+ # words in the English Wikipedia.).
345
+ if self.tokenize_chinese_chars:
346
+ text = self._tokenize_chinese_chars(text)
347
+ # prevents treating the same character with different unicode codepoints as different characters
348
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
349
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
350
+ split_tokens = []
351
+ for token in orig_tokens:
352
+ if token not in never_split:
353
+ if self.do_lower_case:
354
+ token = token.lower()
355
+ if self.strip_accents is not False:
356
+ token = self._run_strip_accents(token)
357
+ elif self.strip_accents:
358
+ token = self._run_strip_accents(token)
359
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
360
+
361
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
362
+ return output_tokens
363
+
364
+ def _run_strip_accents(self, text):
365
+ """Strips accents from a piece of text."""
366
+ text = unicodedata.normalize("NFD", text)
367
+ output = []
368
+ for char in text:
369
+ cat = unicodedata.category(char)
370
+ if cat == "Mn":
371
+ continue
372
+ output.append(char)
373
+ return "".join(output)
374
+
375
+ def _run_split_on_punc(self, text, never_split=None):
376
+ """Splits punctuation on a piece of text."""
377
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
378
+ return [text]
379
+ chars = list(text)
380
+ i = 0
381
+ start_new_word = True
382
+ output = []
383
+ while i < len(chars):
384
+ char = chars[i]
385
+ if _is_punctuation(char):
386
+ output.append([char])
387
+ start_new_word = True
388
+ else:
389
+ if start_new_word:
390
+ output.append([])
391
+ start_new_word = False
392
+ output[-1].append(char)
393
+ i += 1
394
+
395
+ return ["".join(x) for x in output]
396
+
397
+ def _tokenize_chinese_chars(self, text):
398
+ """Adds whitespace around any CJK character."""
399
+ output = []
400
+ for char in text:
401
+ cp = ord(char)
402
+ if self._is_chinese_char(cp):
403
+ output.append(" ")
404
+ output.append(char)
405
+ output.append(" ")
406
+ else:
407
+ output.append(char)
408
+ return "".join(output)
409
+
410
+ def _is_chinese_char(self, cp):
411
+ """Checks whether CP is the codepoint of a CJK character."""
412
+ # This defines a "chinese character" as anything in the CJK Unicode block:
413
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
414
+ #
415
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
416
+ # despite its name. The modern Korean Hangul alphabet is a different block,
417
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
418
+ # space-separated words, so they are not treated specially and handled
419
+ # like the all of the other languages.
420
+ if (
421
+ (cp >= 0x4E00 and cp <= 0x9FFF)
422
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
423
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
424
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
425
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
426
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
427
+ or (cp >= 0xF900 and cp <= 0xFAFF)
428
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
429
+ ): #
430
+ return True
431
+
432
+ return False
433
+
434
+ def _clean_text(self, text):
435
+ """Performs invalid character removal and whitespace cleanup on text."""
436
+ output = []
437
+ for char in text:
438
+ cp = ord(char)
439
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
440
+ continue
441
+ if _is_whitespace(char):
442
+ output.append(" ")
443
+ else:
444
+ output.append(char)
445
+ return "".join(output)
446
+
447
+
448
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
449
+ class WordpieceTokenizer(object):
450
+ """Runs WordPiece tokenization."""
451
+
452
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
453
+ self.vocab = vocab
454
+ self.unk_token = unk_token
455
+ self.max_input_chars_per_word = max_input_chars_per_word
456
+
457
+ def tokenize(self, text):
458
+ """
459
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
460
+ tokenization using the given vocabulary.
461
+
462
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
463
+
464
+ Args:
465
+ text: A single token or whitespace separated tokens. This should have
466
+ already been passed through *BasicTokenizer*.
467
+
468
+ Returns:
469
+ A list of wordpiece tokens.
470
+ """
471
+
472
+ output_tokens = []
473
+ for token in whitespace_tokenize(text):
474
+ chars = list(token)
475
+ if len(chars) > self.max_input_chars_per_word:
476
+ output_tokens.append(self.unk_token)
477
+ continue
478
+
479
+ is_bad = False
480
+ start = 0
481
+ sub_tokens = []
482
+ while start < len(chars):
483
+ end = len(chars)
484
+ cur_substr = None
485
+ while start < end:
486
+ substr = "".join(chars[start:end])
487
+ if start > 0:
488
+ substr = "##" + substr
489
+ if substr in self.vocab:
490
+ cur_substr = substr
491
+ break
492
+ end -= 1
493
+ if cur_substr is None:
494
+ is_bad = True
495
+ break
496
+ sub_tokens.append(cur_substr)
497
+ start = end
498
+
499
+ if is_bad:
500
+ output_tokens.append(self.unk_token)
501
+ else:
502
+ output_tokens.extend(sub_tokens)
503
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert_fast.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for ConvBERT."""
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import normalizers
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from ...utils import logging
23
+ from .tokenization_convbert import ConvBertTokenizer
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+
31
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->YituTech/conv-bert-base, Bert->ConvBert, BERT->ConvBERT
32
+ class ConvBertTokenizerFast(PreTrainedTokenizerFast):
33
+ r"""
34
+ Construct a "fast" ConvBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
35
+
36
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
37
+ refer to this superclass for more information regarding those methods.
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ File containing the vocabulary.
42
+ do_lower_case (`bool`, *optional*, defaults to `True`):
43
+ Whether or not to lowercase the input when tokenizing.
44
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
45
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
46
+ token instead.
47
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
48
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
49
+ sequence classification or for a text and a question for question answering. It is also used as the last
50
+ token of a sequence built with special tokens.
51
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
52
+ The token used for padding, for example when batching sequences of different lengths.
53
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
54
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
55
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
56
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
57
+ The token used for masking values. This is the token used when training this model with masked language
58
+ modeling. This is the token which the model will try to predict.
59
+ clean_text (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
61
+ whitespaces by the classic one.
62
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
63
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
64
+ issue](https://github.com/huggingface/transformers/issues/328)).
65
+ strip_accents (`bool`, *optional*):
66
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
67
+ value for `lowercase` (as in the original ConvBERT).
68
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
69
+ The prefix for subwords.
70
+ """
71
+
72
+ vocab_files_names = VOCAB_FILES_NAMES
73
+ slow_tokenizer_class = ConvBertTokenizer
74
+
75
+ def __init__(
76
+ self,
77
+ vocab_file=None,
78
+ tokenizer_file=None,
79
+ do_lower_case=True,
80
+ unk_token="[UNK]",
81
+ sep_token="[SEP]",
82
+ pad_token="[PAD]",
83
+ cls_token="[CLS]",
84
+ mask_token="[MASK]",
85
+ tokenize_chinese_chars=True,
86
+ strip_accents=None,
87
+ **kwargs,
88
+ ):
89
+ super().__init__(
90
+ vocab_file,
91
+ tokenizer_file=tokenizer_file,
92
+ do_lower_case=do_lower_case,
93
+ unk_token=unk_token,
94
+ sep_token=sep_token,
95
+ pad_token=pad_token,
96
+ cls_token=cls_token,
97
+ mask_token=mask_token,
98
+ tokenize_chinese_chars=tokenize_chinese_chars,
99
+ strip_accents=strip_accents,
100
+ **kwargs,
101
+ )
102
+
103
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
104
+ if (
105
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
106
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
107
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
108
+ ):
109
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
110
+ normalizer_state["lowercase"] = do_lower_case
111
+ normalizer_state["strip_accents"] = strip_accents
112
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
113
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
114
+
115
+ self.do_lower_case = do_lower_case
116
+
117
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
118
+ """
119
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
120
+ adding special tokens. A ConvBERT sequence has the following format:
121
+
122
+ - single sequence: `[CLS] X [SEP]`
123
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
124
+
125
+ Args:
126
+ token_ids_0 (`List[int]`):
127
+ List of IDs to which the special tokens will be added.
128
+ token_ids_1 (`List[int]`, *optional*):
129
+ Optional second list of IDs for sequence pairs.
130
+
131
+ Returns:
132
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
133
+ """
134
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
135
+
136
+ if token_ids_1 is not None:
137
+ output += token_ids_1 + [self.sep_token_id]
138
+
139
+ return output
140
+
141
+ def create_token_type_ids_from_sequences(
142
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
143
+ ) -> List[int]:
144
+ """
145
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence
146
+ pair mask has the following format:
147
+
148
+ ```
149
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
150
+ | first sequence | second sequence |
151
+ ```
152
+
153
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
154
+
155
+ Args:
156
+ token_ids_0 (`List[int]`):
157
+ List of IDs.
158
+ token_ids_1 (`List[int]`, *optional*):
159
+ Optional second list of IDs for sequence pairs.
160
+
161
+ Returns:
162
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
163
+ """
164
+ sep = [self.sep_token_id]
165
+ cls = [self.cls_token_id]
166
+ if token_ids_1 is None:
167
+ return len(cls + token_ids_0 + sep) * [0]
168
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
169
+
170
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
171
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
172
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/mask2former/__pycache__/image_processing_mask2former.cpython-310.pyc ADDED
Binary file (39.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mluke/__init__.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+
23
+ try:
24
+ if not is_sentencepiece_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["tokenization_mluke"] = ["MLukeTokenizer"]
30
+
31
+ if TYPE_CHECKING:
32
+ try:
33
+ if not is_sentencepiece_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ from .tokenization_mluke import MLukeTokenizer
39
+
40
+
41
+ else:
42
+ import sys
43
+
44
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mluke/convert_mluke_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert mLUKE checkpoint."""
16
+
17
+ import argparse
18
+ import json
19
+ import os
20
+ from collections import OrderedDict
21
+
22
+ import torch
23
+
24
+ from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
25
+ from transformers.tokenization_utils_base import AddedToken
26
+
27
+
28
+ @torch.no_grad()
29
+ def convert_luke_checkpoint(checkpoint_path, metadata_path, entity_vocab_path, pytorch_dump_folder_path, model_size):
30
+ # Load configuration defined in the metadata file
31
+ with open(metadata_path) as metadata_file:
32
+ metadata = json.load(metadata_file)
33
+ config = LukeConfig(use_entity_aware_attention=True, **metadata["model_config"])
34
+
35
+ # Load in the weights from the checkpoint_path
36
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["module"]
37
+
38
+ # Load the entity vocab file
39
+ entity_vocab = load_original_entity_vocab(entity_vocab_path)
40
+ # add an entry for [MASK2]
41
+ entity_vocab["[MASK2]"] = max(entity_vocab.values()) + 1
42
+ config.entity_vocab_size += 1
43
+
44
+ tokenizer = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"])
45
+
46
+ # Add special tokens to the token vocabulary for downstream tasks
47
+ entity_token_1 = AddedToken("<ent>", lstrip=False, rstrip=False)
48
+ entity_token_2 = AddedToken("<ent2>", lstrip=False, rstrip=False)
49
+ tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_1, entity_token_2]})
50
+ config.vocab_size += 2
51
+
52
+ print(f"Saving tokenizer to {pytorch_dump_folder_path}")
53
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
54
+ with open(os.path.join(pytorch_dump_folder_path, "tokenizer_config.json"), "r") as f:
55
+ tokenizer_config = json.load(f)
56
+ tokenizer_config["tokenizer_class"] = "MLukeTokenizer"
57
+ with open(os.path.join(pytorch_dump_folder_path, "tokenizer_config.json"), "w") as f:
58
+ json.dump(tokenizer_config, f)
59
+
60
+ with open(os.path.join(pytorch_dump_folder_path, MLukeTokenizer.vocab_files_names["entity_vocab_file"]), "w") as f:
61
+ json.dump(entity_vocab, f)
62
+
63
+ tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path)
64
+
65
+ # Initialize the embeddings of the special tokens
66
+ ent_init_index = tokenizer.convert_tokens_to_ids(["@"])[0]
67
+ ent2_init_index = tokenizer.convert_tokens_to_ids(["#"])[0]
68
+
69
+ word_emb = state_dict["embeddings.word_embeddings.weight"]
70
+ ent_emb = word_emb[ent_init_index].unsqueeze(0)
71
+ ent2_emb = word_emb[ent2_init_index].unsqueeze(0)
72
+ state_dict["embeddings.word_embeddings.weight"] = torch.cat([word_emb, ent_emb, ent2_emb])
73
+ # add special tokens for 'entity_predictions.bias'
74
+ for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
75
+ decoder_bias = state_dict[bias_name]
76
+ ent_decoder_bias = decoder_bias[ent_init_index].unsqueeze(0)
77
+ ent2_decoder_bias = decoder_bias[ent2_init_index].unsqueeze(0)
78
+ state_dict[bias_name] = torch.cat([decoder_bias, ent_decoder_bias, ent2_decoder_bias])
79
+
80
+ # Initialize the query layers of the entity-aware self-attention mechanism
81
+ for layer_index in range(config.num_hidden_layers):
82
+ for matrix_name in ["query.weight", "query.bias"]:
83
+ prefix = f"encoder.layer.{layer_index}.attention.self."
84
+ state_dict[prefix + "w2e_" + matrix_name] = state_dict[prefix + matrix_name]
85
+ state_dict[prefix + "e2w_" + matrix_name] = state_dict[prefix + matrix_name]
86
+ state_dict[prefix + "e2e_" + matrix_name] = state_dict[prefix + matrix_name]
87
+
88
+ # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
89
+ entity_emb = state_dict["entity_embeddings.entity_embeddings.weight"]
90
+ entity_mask_emb = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0)
91
+ state_dict["entity_embeddings.entity_embeddings.weight"] = torch.cat([entity_emb, entity_mask_emb])
92
+ # add [MASK2] for 'entity_predictions.bias'
93
+ entity_prediction_bias = state_dict["entity_predictions.bias"]
94
+ entity_mask_bias = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0)
95
+ state_dict["entity_predictions.bias"] = torch.cat([entity_prediction_bias, entity_mask_bias])
96
+
97
+ model = LukeForMaskedLM(config=config).eval()
98
+
99
+ state_dict.pop("entity_predictions.decoder.weight")
100
+ state_dict.pop("lm_head.decoder.weight")
101
+ state_dict.pop("lm_head.decoder.bias")
102
+ state_dict_for_hugging_face = OrderedDict()
103
+ for key, value in state_dict.items():
104
+ if not (key.startswith("lm_head") or key.startswith("entity_predictions")):
105
+ state_dict_for_hugging_face[f"luke.{key}"] = state_dict[key]
106
+ else:
107
+ state_dict_for_hugging_face[key] = state_dict[key]
108
+
109
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict_for_hugging_face, strict=False)
110
+
111
+ if set(unexpected_keys) != {"luke.embeddings.position_ids"}:
112
+ raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}")
113
+ if set(missing_keys) != {
114
+ "lm_head.decoder.weight",
115
+ "lm_head.decoder.bias",
116
+ "entity_predictions.decoder.weight",
117
+ }:
118
+ raise ValueError(f"Unexpected missing_keys: {missing_keys}")
119
+
120
+ model.tie_weights()
121
+ assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
122
+ assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
123
+
124
+ # Check outputs
125
+ tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path, task="entity_classification")
126
+
127
+ text = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
128
+ span = (0, 9)
129
+ encoding = tokenizer(text, entity_spans=[span], return_tensors="pt")
130
+
131
+ outputs = model(**encoding)
132
+
133
+ # Verify word hidden states
134
+ if model_size == "large":
135
+ raise NotImplementedError
136
+ else: # base
137
+ expected_shape = torch.Size((1, 33, 768))
138
+ expected_slice = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]])
139
+
140
+ if not (outputs.last_hidden_state.shape == expected_shape):
141
+ raise ValueError(
142
+ f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}"
143
+ )
144
+ if not torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4):
145
+ raise ValueError
146
+
147
+ # Verify entity hidden states
148
+ if model_size == "large":
149
+ raise NotImplementedError
150
+ else: # base
151
+ expected_shape = torch.Size((1, 1, 768))
152
+ expected_slice = torch.tensor([[-0.1482, 0.0609, 0.0322]])
153
+
154
+ if not (outputs.entity_last_hidden_state.shape == expected_shape):
155
+ raise ValueError(
156
+ f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
157
+ f" {expected_shape}"
158
+ )
159
+ if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4):
160
+ raise ValueError
161
+
162
+ # Verify masked word/entity prediction
163
+ tokenizer = MLukeTokenizer.from_pretrained(pytorch_dump_folder_path)
164
+ text = "Tokyo is the capital of <mask>."
165
+ span = (24, 30)
166
+ encoding = tokenizer(text, entity_spans=[span], return_tensors="pt")
167
+
168
+ outputs = model(**encoding)
169
+
170
+ input_ids = encoding["input_ids"][0].tolist()
171
+ mask_position_id = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>"))
172
+ predicted_id = outputs.logits[0][mask_position_id].argmax(dim=-1)
173
+ assert "Japan" == tokenizer.decode(predicted_id)
174
+
175
+ predicted_entity_id = outputs.entity_logits[0][0].argmax().item()
176
+ multilingual_predicted_entities = [
177
+ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
178
+ ]
179
+ assert [e for e in multilingual_predicted_entities if e.startswith("en:")][0] == "en:Japan"
180
+
181
+ # Finally, save our PyTorch model and tokenizer
182
+ print("Saving PyTorch model to {}".format(pytorch_dump_folder_path))
183
+ model.save_pretrained(pytorch_dump_folder_path)
184
+
185
+
186
+ def load_original_entity_vocab(entity_vocab_path):
187
+ SPECIAL_TOKENS = ["[MASK]", "[PAD]", "[UNK]"]
188
+
189
+ data = [json.loads(line) for line in open(entity_vocab_path)]
190
+
191
+ new_mapping = {}
192
+ for entry in data:
193
+ entity_id = entry["id"]
194
+ for entity_name, language in entry["entities"]:
195
+ if entity_name in SPECIAL_TOKENS:
196
+ new_mapping[entity_name] = entity_id
197
+ break
198
+ new_entity_name = f"{language}:{entity_name}"
199
+ new_mapping[new_entity_name] = entity_id
200
+ return new_mapping
201
+
202
+
203
+ if __name__ == "__main__":
204
+ parser = argparse.ArgumentParser()
205
+ # Required parameters
206
+ parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
207
+ parser.add_argument(
208
+ "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
209
+ )
210
+ parser.add_argument(
211
+ "--entity_vocab_path",
212
+ default=None,
213
+ type=str,
214
+ help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
215
+ )
216
+ parser.add_argument(
217
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
218
+ )
219
+ parser.add_argument(
220
+ "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
221
+ )
222
+ args = parser.parse_args()
223
+ convert_luke_checkpoint(
224
+ args.checkpoint_path,
225
+ args.metadata_path,
226
+ args.entity_vocab_path,
227
+ args.pytorch_dump_folder_path,
228
+ args.model_size,
229
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mluke/tokenization_mluke.py ADDED
@@ -0,0 +1,1614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Studio Ousia and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for mLUKE."""
16
+
17
+
18
+ import itertools
19
+ import json
20
+ import os
21
+ from collections.abc import Mapping
22
+ from shutil import copyfile
23
+ from typing import Any, Dict, List, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import sentencepiece as spm
27
+
28
+ from ...tokenization_utils import PreTrainedTokenizer
29
+ from ...tokenization_utils_base import (
30
+ ENCODE_KWARGS_DOCSTRING,
31
+ AddedToken,
32
+ BatchEncoding,
33
+ EncodedInput,
34
+ PaddingStrategy,
35
+ TensorType,
36
+ TextInput,
37
+ TextInputPair,
38
+ TruncationStrategy,
39
+ to_py_obj,
40
+ )
41
+ from ...utils import add_end_docstrings, is_tf_tensor, is_torch_tensor, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ EntitySpan = Tuple[int, int]
47
+ EntitySpanInput = List[EntitySpan]
48
+ Entity = str
49
+ EntityInput = List[Entity]
50
+
51
+ SPIECE_UNDERLINE = "▁"
52
+
53
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "entity_vocab_file": "entity_vocab.json"}
54
+
55
+
56
+ ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
57
+ return_token_type_ids (`bool`, *optional*):
58
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
59
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
60
+
61
+ [What are token type IDs?](../glossary#token-type-ids)
62
+ return_attention_mask (`bool`, *optional*):
63
+ Whether to return the attention mask. If left to the default, will return the attention mask according
64
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
65
+
66
+ [What are attention masks?](../glossary#attention-mask)
67
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
69
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
70
+ of returning overflowing tokens.
71
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
72
+ Whether or not to return special tokens mask information.
73
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
74
+ Whether or not to return `(char_start, char_end)` for each token.
75
+
76
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
77
+ Python's tokenizer, this method will raise `NotImplementedError`.
78
+ return_length (`bool`, *optional*, defaults to `False`):
79
+ Whether or not to return the lengths of the encoded inputs.
80
+ verbose (`bool`, *optional*, defaults to `True`):
81
+ Whether or not to print more information and warnings.
82
+ **kwargs: passed to the `self.tokenize()` method
83
+
84
+ Return:
85
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
86
+
87
+ - **input_ids** -- List of token ids to be fed to a model.
88
+
89
+ [What are input IDs?](../glossary#input-ids)
90
+
91
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
92
+ if *"token_type_ids"* is in `self.model_input_names`).
93
+
94
+ [What are token type IDs?](../glossary#token-type-ids)
95
+
96
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
97
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
98
+
99
+ [What are attention masks?](../glossary#attention-mask)
100
+
101
+ - **entity_ids** -- List of entity ids to be fed to a model.
102
+
103
+ [What are input IDs?](../glossary#input-ids)
104
+
105
+ - **entity_position_ids** -- List of entity positions in the input sequence to be fed to a model.
106
+
107
+ - **entity_token_type_ids** -- List of entity token type ids to be fed to a model (when
108
+ `return_token_type_ids=True` or if *"entity_token_type_ids"* is in `self.model_input_names`).
109
+
110
+ [What are token type IDs?](../glossary#token-type-ids)
111
+
112
+ - **entity_attention_mask** -- List of indices specifying which entities should be attended to by the model
113
+ (when `return_attention_mask=True` or if *"entity_attention_mask"* is in `self.model_input_names`).
114
+
115
+ [What are attention masks?](../glossary#attention-mask)
116
+
117
+ - **entity_start_positions** -- List of the start positions of entities in the word token sequence (when
118
+ `task="entity_span_classification"`).
119
+ - **entity_end_positions** -- List of the end positions of entities in the word token sequence (when
120
+ `task="entity_span_classification"`).
121
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
122
+ `return_overflowing_tokens=True`).
123
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
124
+ `return_overflowing_tokens=True`).
125
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
126
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
127
+ - **length** -- The length of the inputs (when `return_length=True`)
128
+
129
+ """
130
+
131
+
132
+ class MLukeTokenizer(PreTrainedTokenizer):
133
+ """
134
+ Adapted from [`XLMRobertaTokenizer`] and [`LukeTokenizer`]. Based on
135
+ [SentencePiece](https://github.com/google/sentencepiece).
136
+
137
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
138
+ this superclass for more information regarding those methods.
139
+
140
+ Args:
141
+ vocab_file (`str`):
142
+ Path to the vocabulary file.
143
+ entity_vocab_file (`str`):
144
+ Path to the entity vocabulary file.
145
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
146
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
147
+
148
+ <Tip>
149
+
150
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
151
+ sequence. The token used is the `cls_token`.
152
+
153
+ </Tip>
154
+
155
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
156
+ The end of sequence token.
157
+
158
+ <Tip>
159
+
160
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
161
+ The token used is the `sep_token`.
162
+
163
+ </Tip>
164
+
165
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
166
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
167
+ sequence classification or for a text and a question for question answering. It is also used as the last
168
+ token of a sequence built with special tokens.
169
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
170
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
171
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
172
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
173
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
174
+ token instead.
175
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
176
+ The token used for padding, for example when batching sequences of different lengths.
177
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
178
+ The token used for masking values. This is the token used when training this model with masked language
179
+ modeling. This is the token which the model will try to predict.
180
+ task (`str`, *optional*):
181
+ Task for which you want to prepare sequences. One of `"entity_classification"`,
182
+ `"entity_pair_classification"`, or `"entity_span_classification"`. If you specify this argument, the entity
183
+ sequence is automatically created based on the given entity span(s).
184
+ max_entity_length (`int`, *optional*, defaults to 32):
185
+ The maximum length of `entity_ids`.
186
+ max_mention_length (`int`, *optional*, defaults to 30):
187
+ The maximum number of tokens inside an entity span.
188
+ entity_token_1 (`str`, *optional*, defaults to `<ent>`):
189
+ The special token used to represent an entity span in a word token sequence. This token is only used when
190
+ `task` is set to `"entity_classification"` or `"entity_pair_classification"`.
191
+ entity_token_2 (`str`, *optional*, defaults to `<ent2>`):
192
+ The special token used to represent an entity span in a word token sequence. This token is only used when
193
+ `task` is set to `"entity_pair_classification"`.
194
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
195
+ Additional special tokens used by the tokenizer.
196
+ sp_model_kwargs (`dict`, *optional*):
197
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
198
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
199
+ to set:
200
+
201
+ - `enable_sampling`: Enable subword regularization.
202
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
203
+
204
+ - `nbest_size = {0,1}`: No sampling is performed.
205
+ - `nbest_size > 1`: samples from the nbest_size results.
206
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
207
+ using forward-filtering-and-backward-sampling algorithm.
208
+
209
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
210
+ BPE-dropout.
211
+
212
+ Attributes:
213
+ sp_model (`SentencePieceProcessor`):
214
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
215
+ """
216
+
217
+ vocab_files_names = VOCAB_FILES_NAMES
218
+ model_input_names = ["input_ids", "attention_mask"]
219
+
220
+ def __init__(
221
+ self,
222
+ vocab_file,
223
+ entity_vocab_file,
224
+ bos_token="<s>",
225
+ eos_token="</s>",
226
+ sep_token="</s>",
227
+ cls_token="<s>",
228
+ unk_token="<unk>",
229
+ pad_token="<pad>",
230
+ mask_token="<mask>",
231
+ task=None,
232
+ max_entity_length=32,
233
+ max_mention_length=30,
234
+ entity_token_1="<ent>",
235
+ entity_token_2="<ent2>",
236
+ entity_unk_token="[UNK]",
237
+ entity_pad_token="[PAD]",
238
+ entity_mask_token="[MASK]",
239
+ entity_mask2_token="[MASK2]",
240
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
241
+ **kwargs,
242
+ ) -> None:
243
+ # Mask token behave like a normal word, i.e. include the space before it
244
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
245
+
246
+ # we add 2 special tokens for downstream tasks
247
+ # for more information about lstrip and rstrip, see https://github.com/huggingface/transformers/pull/2778
248
+ entity_token_1 = (
249
+ AddedToken(entity_token_1, lstrip=False, rstrip=False)
250
+ if isinstance(entity_token_1, str)
251
+ else entity_token_1
252
+ )
253
+ entity_token_2 = (
254
+ AddedToken(entity_token_2, lstrip=False, rstrip=False)
255
+ if isinstance(entity_token_2, str)
256
+ else entity_token_2
257
+ )
258
+ additional_special_tokens = kwargs.pop("additional_special_tokens", [])
259
+ additional_special_tokens += [entity_token_1, entity_token_2]
260
+
261
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
262
+
263
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
264
+ self.sp_model.Load(str(vocab_file))
265
+ self.vocab_file = vocab_file
266
+
267
+ # Original fairseq vocab and spm vocab must be "aligned":
268
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
269
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
270
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
271
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
272
+
273
+ # Mimic fairseq token-to-id alignment for the first 4 token
274
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
275
+
276
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
277
+ self.fairseq_offset = 1
278
+
279
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset
280
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
281
+
282
+ with open(entity_vocab_file, encoding="utf-8") as entity_vocab_handle:
283
+ self.entity_vocab = json.load(entity_vocab_handle)
284
+ for entity_special_token in [entity_unk_token, entity_pad_token, entity_mask_token, entity_mask2_token]:
285
+ if entity_special_token not in self.entity_vocab:
286
+ raise ValueError(
287
+ f"Specified entity special token ``{entity_special_token}`` is not found in entity_vocab. "
288
+ f"Probably an incorrect entity vocab file is loaded: {entity_vocab_file}."
289
+ )
290
+ self.entity_unk_token_id = self.entity_vocab[entity_unk_token]
291
+ self.entity_pad_token_id = self.entity_vocab[entity_pad_token]
292
+ self.entity_mask_token_id = self.entity_vocab[entity_mask_token]
293
+ self.entity_mask2_token_id = self.entity_vocab[entity_mask2_token]
294
+
295
+ self.task = task
296
+ if task is None or task == "entity_span_classification":
297
+ self.max_entity_length = max_entity_length
298
+ elif task == "entity_classification":
299
+ self.max_entity_length = 1
300
+ elif task == "entity_pair_classification":
301
+ self.max_entity_length = 2
302
+ else:
303
+ raise ValueError(
304
+ f"Task {task} not supported. Select task from ['entity_classification', 'entity_pair_classification',"
305
+ " 'entity_span_classification'] only."
306
+ )
307
+
308
+ self.max_mention_length = max_mention_length
309
+
310
+ super().__init__(
311
+ bos_token=bos_token,
312
+ eos_token=eos_token,
313
+ unk_token=unk_token,
314
+ sep_token=sep_token,
315
+ cls_token=cls_token,
316
+ pad_token=pad_token,
317
+ mask_token=mask_token,
318
+ sp_model_kwargs=self.sp_model_kwargs,
319
+ task=task,
320
+ max_entity_length=max_entity_length,
321
+ max_mention_length=max_mention_length,
322
+ entity_token_1=entity_token_1,
323
+ entity_token_2=entity_token_2,
324
+ entity_unk_token=entity_unk_token,
325
+ entity_pad_token=entity_pad_token,
326
+ entity_mask_token=entity_mask_token,
327
+ entity_mask2_token=entity_mask2_token,
328
+ additional_special_tokens=additional_special_tokens,
329
+ **kwargs,
330
+ )
331
+
332
+ @property
333
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.vocab_size
334
+ def vocab_size(self):
335
+ return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
336
+
337
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.get_vocab
338
+ def get_vocab(self):
339
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
340
+ vocab.update(self.added_tokens_encoder)
341
+ return vocab
342
+
343
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer._tokenize
344
+ def _tokenize(self, text: str) -> List[str]:
345
+ # TODO check if the t5/llama PR also applies here
346
+ return self.sp_model.encode(text, out_type=str)
347
+
348
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer._convert_token_to_id
349
+ def _convert_token_to_id(self, token):
350
+ """Converts a token (str) in an id using the vocab."""
351
+ if token in self.fairseq_tokens_to_ids:
352
+ return self.fairseq_tokens_to_ids[token]
353
+ spm_id = self.sp_model.PieceToId(token)
354
+
355
+ # Need to return unknown token if the SP model returned 0
356
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
357
+
358
+ def _convert_id_to_token(self, index):
359
+ """Converts an index (integer) in a token (str) using the vocab."""
360
+ if index in self.fairseq_ids_to_tokens:
361
+ return self.fairseq_ids_to_tokens[index]
362
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
363
+
364
+ def convert_tokens_to_string(self, tokens):
365
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
366
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
367
+ return out_string
368
+
369
+ def __getstate__(self):
370
+ state = self.__dict__.copy()
371
+ state["sp_model"] = None
372
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
373
+ return state
374
+
375
+ def __setstate__(self, d):
376
+ self.__dict__ = d
377
+
378
+ # for backward compatibility
379
+ if not hasattr(self, "sp_model_kwargs"):
380
+ self.sp_model_kwargs = {}
381
+
382
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
383
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
384
+
385
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
386
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.__call__
387
+ def __call__(
388
+ self,
389
+ text: Union[TextInput, List[TextInput]],
390
+ text_pair: Optional[Union[TextInput, List[TextInput]]] = None,
391
+ entity_spans: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None,
392
+ entity_spans_pair: Optional[Union[EntitySpanInput, List[EntitySpanInput]]] = None,
393
+ entities: Optional[Union[EntityInput, List[EntityInput]]] = None,
394
+ entities_pair: Optional[Union[EntityInput, List[EntityInput]]] = None,
395
+ add_special_tokens: bool = True,
396
+ padding: Union[bool, str, PaddingStrategy] = False,
397
+ truncation: Union[bool, str, TruncationStrategy] = None,
398
+ max_length: Optional[int] = None,
399
+ max_entity_length: Optional[int] = None,
400
+ stride: int = 0,
401
+ is_split_into_words: Optional[bool] = False,
402
+ pad_to_multiple_of: Optional[int] = None,
403
+ return_tensors: Optional[Union[str, TensorType]] = None,
404
+ return_token_type_ids: Optional[bool] = None,
405
+ return_attention_mask: Optional[bool] = None,
406
+ return_overflowing_tokens: bool = False,
407
+ return_special_tokens_mask: bool = False,
408
+ return_offsets_mapping: bool = False,
409
+ return_length: bool = False,
410
+ verbose: bool = True,
411
+ **kwargs,
412
+ ) -> BatchEncoding:
413
+ """
414
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
415
+ sequences, depending on the task you want to prepare them for.
416
+
417
+ Args:
418
+ text (`str`, `List[str]`, `List[List[str]]`):
419
+ The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
420
+ tokenizer does not support tokenization based on pretokenized strings.
421
+ text_pair (`str`, `List[str]`, `List[List[str]]`):
422
+ The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this
423
+ tokenizer does not support tokenization based on pretokenized strings.
424
+ entity_spans (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*):
425
+ The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
426
+ with two integers denoting character-based start and end positions of entities. If you specify
427
+ `"entity_classification"` or `"entity_pair_classification"` as the `task` argument in the constructor,
428
+ the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each
429
+ sequence must be equal to the length of each sequence of `entities`.
430
+ entity_spans_pair (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*):
431
+ The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each
432
+ with two integers denoting character-based start and end positions of entities. If you specify the
433
+ `task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the
434
+ length of each sequence must be equal to the length of each sequence of `entities_pair`.
435
+ entities (`List[str]`, `List[List[str]]`, *optional*):
436
+ The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
437
+ representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
438
+ Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
439
+ each sequence must be equal to the length of each sequence of `entity_spans`. If you specify
440
+ `entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences
441
+ is automatically constructed by filling it with the [MASK] entity.
442
+ entities_pair (`List[str]`, `List[List[str]]`, *optional*):
443
+ The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
444
+ representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
445
+ Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
446
+ each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify
447
+ `entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity
448
+ sequences is automatically constructed by filling it with the [MASK] entity.
449
+ max_entity_length (`int`, *optional*):
450
+ The maximum length of `entity_ids`.
451
+ """
452
+ # Input type checking for clearer error
453
+ is_valid_single_text = isinstance(text, str)
454
+ is_valid_batch_text = isinstance(text, (list, tuple)) and (len(text) == 0 or (isinstance(text[0], str)))
455
+ if not (is_valid_single_text or is_valid_batch_text):
456
+ raise ValueError("text input must be of type `str` (single example) or `List[str]` (batch).")
457
+
458
+ is_valid_single_text_pair = isinstance(text_pair, str)
459
+ is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and (
460
+ len(text_pair) == 0 or isinstance(text_pair[0], str)
461
+ )
462
+ if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair):
463
+ raise ValueError("text_pair input must be of type `str` (single example) or `List[str]` (batch).")
464
+
465
+ is_batched = bool(isinstance(text, (list, tuple)))
466
+
467
+ if is_batched:
468
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
469
+ if entities is None:
470
+ batch_entities_or_entities_pairs = None
471
+ else:
472
+ batch_entities_or_entities_pairs = (
473
+ list(zip(entities, entities_pair)) if entities_pair is not None else entities
474
+ )
475
+
476
+ if entity_spans is None:
477
+ batch_entity_spans_or_entity_spans_pairs = None
478
+ else:
479
+ batch_entity_spans_or_entity_spans_pairs = (
480
+ list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans
481
+ )
482
+
483
+ return self.batch_encode_plus(
484
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
485
+ batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs,
486
+ batch_entities_or_entities_pairs=batch_entities_or_entities_pairs,
487
+ add_special_tokens=add_special_tokens,
488
+ padding=padding,
489
+ truncation=truncation,
490
+ max_length=max_length,
491
+ max_entity_length=max_entity_length,
492
+ stride=stride,
493
+ is_split_into_words=is_split_into_words,
494
+ pad_to_multiple_of=pad_to_multiple_of,
495
+ return_tensors=return_tensors,
496
+ return_token_type_ids=return_token_type_ids,
497
+ return_attention_mask=return_attention_mask,
498
+ return_overflowing_tokens=return_overflowing_tokens,
499
+ return_special_tokens_mask=return_special_tokens_mask,
500
+ return_offsets_mapping=return_offsets_mapping,
501
+ return_length=return_length,
502
+ verbose=verbose,
503
+ **kwargs,
504
+ )
505
+ else:
506
+ return self.encode_plus(
507
+ text=text,
508
+ text_pair=text_pair,
509
+ entity_spans=entity_spans,
510
+ entity_spans_pair=entity_spans_pair,
511
+ entities=entities,
512
+ entities_pair=entities_pair,
513
+ add_special_tokens=add_special_tokens,
514
+ padding=padding,
515
+ truncation=truncation,
516
+ max_length=max_length,
517
+ max_entity_length=max_entity_length,
518
+ stride=stride,
519
+ is_split_into_words=is_split_into_words,
520
+ pad_to_multiple_of=pad_to_multiple_of,
521
+ return_tensors=return_tensors,
522
+ return_token_type_ids=return_token_type_ids,
523
+ return_attention_mask=return_attention_mask,
524
+ return_overflowing_tokens=return_overflowing_tokens,
525
+ return_special_tokens_mask=return_special_tokens_mask,
526
+ return_offsets_mapping=return_offsets_mapping,
527
+ return_length=return_length,
528
+ verbose=verbose,
529
+ **kwargs,
530
+ )
531
+
532
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._encode_plus
533
+ def _encode_plus(
534
+ self,
535
+ text: Union[TextInput],
536
+ text_pair: Optional[Union[TextInput]] = None,
537
+ entity_spans: Optional[EntitySpanInput] = None,
538
+ entity_spans_pair: Optional[EntitySpanInput] = None,
539
+ entities: Optional[EntityInput] = None,
540
+ entities_pair: Optional[EntityInput] = None,
541
+ add_special_tokens: bool = True,
542
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
543
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
544
+ max_length: Optional[int] = None,
545
+ max_entity_length: Optional[int] = None,
546
+ stride: int = 0,
547
+ is_split_into_words: Optional[bool] = False,
548
+ pad_to_multiple_of: Optional[int] = None,
549
+ return_tensors: Optional[Union[str, TensorType]] = None,
550
+ return_token_type_ids: Optional[bool] = None,
551
+ return_attention_mask: Optional[bool] = None,
552
+ return_overflowing_tokens: bool = False,
553
+ return_special_tokens_mask: bool = False,
554
+ return_offsets_mapping: bool = False,
555
+ return_length: bool = False,
556
+ verbose: bool = True,
557
+ **kwargs,
558
+ ) -> BatchEncoding:
559
+ if return_offsets_mapping:
560
+ raise NotImplementedError(
561
+ "return_offset_mapping is not available when using Python tokenizers. "
562
+ "To use this feature, change your tokenizer to one deriving from "
563
+ "transformers.PreTrainedTokenizerFast. "
564
+ "More information on available tokenizers at "
565
+ "https://github.com/huggingface/transformers/pull/2674"
566
+ )
567
+
568
+ if is_split_into_words:
569
+ raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
570
+
571
+ (
572
+ first_ids,
573
+ second_ids,
574
+ first_entity_ids,
575
+ second_entity_ids,
576
+ first_entity_token_spans,
577
+ second_entity_token_spans,
578
+ ) = self._create_input_sequence(
579
+ text=text,
580
+ text_pair=text_pair,
581
+ entities=entities,
582
+ entities_pair=entities_pair,
583
+ entity_spans=entity_spans,
584
+ entity_spans_pair=entity_spans_pair,
585
+ **kwargs,
586
+ )
587
+
588
+ # prepare_for_model will create the attention_mask and token_type_ids
589
+ return self.prepare_for_model(
590
+ first_ids,
591
+ pair_ids=second_ids,
592
+ entity_ids=first_entity_ids,
593
+ pair_entity_ids=second_entity_ids,
594
+ entity_token_spans=first_entity_token_spans,
595
+ pair_entity_token_spans=second_entity_token_spans,
596
+ add_special_tokens=add_special_tokens,
597
+ padding=padding_strategy.value,
598
+ truncation=truncation_strategy.value,
599
+ max_length=max_length,
600
+ max_entity_length=max_entity_length,
601
+ stride=stride,
602
+ pad_to_multiple_of=pad_to_multiple_of,
603
+ return_tensors=return_tensors,
604
+ prepend_batch_axis=True,
605
+ return_attention_mask=return_attention_mask,
606
+ return_token_type_ids=return_token_type_ids,
607
+ return_overflowing_tokens=return_overflowing_tokens,
608
+ return_special_tokens_mask=return_special_tokens_mask,
609
+ return_length=return_length,
610
+ verbose=verbose,
611
+ )
612
+
613
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._batch_encode_plus
614
+ def _batch_encode_plus(
615
+ self,
616
+ batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair]],
617
+ batch_entity_spans_or_entity_spans_pairs: Optional[
618
+ Union[List[EntitySpanInput], List[Tuple[EntitySpanInput, EntitySpanInput]]]
619
+ ] = None,
620
+ batch_entities_or_entities_pairs: Optional[
621
+ Union[List[EntityInput], List[Tuple[EntityInput, EntityInput]]]
622
+ ] = None,
623
+ add_special_tokens: bool = True,
624
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
625
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
626
+ max_length: Optional[int] = None,
627
+ max_entity_length: Optional[int] = None,
628
+ stride: int = 0,
629
+ is_split_into_words: Optional[bool] = False,
630
+ pad_to_multiple_of: Optional[int] = None,
631
+ return_tensors: Optional[Union[str, TensorType]] = None,
632
+ return_token_type_ids: Optional[bool] = None,
633
+ return_attention_mask: Optional[bool] = None,
634
+ return_overflowing_tokens: bool = False,
635
+ return_special_tokens_mask: bool = False,
636
+ return_offsets_mapping: bool = False,
637
+ return_length: bool = False,
638
+ verbose: bool = True,
639
+ **kwargs,
640
+ ) -> BatchEncoding:
641
+ if return_offsets_mapping:
642
+ raise NotImplementedError(
643
+ "return_offset_mapping is not available when using Python tokenizers. "
644
+ "To use this feature, change your tokenizer to one deriving from "
645
+ "transformers.PreTrainedTokenizerFast."
646
+ )
647
+
648
+ if is_split_into_words:
649
+ raise NotImplementedError("is_split_into_words is not supported in this tokenizer.")
650
+
651
+ # input_ids is a list of tuples (one for each example in the batch)
652
+ input_ids = []
653
+ entity_ids = []
654
+ entity_token_spans = []
655
+ for index, text_or_text_pair in enumerate(batch_text_or_text_pairs):
656
+ if not isinstance(text_or_text_pair, (list, tuple)):
657
+ text, text_pair = text_or_text_pair, None
658
+ else:
659
+ text, text_pair = text_or_text_pair
660
+
661
+ entities, entities_pair = None, None
662
+ if batch_entities_or_entities_pairs is not None:
663
+ entities_or_entities_pairs = batch_entities_or_entities_pairs[index]
664
+ if entities_or_entities_pairs:
665
+ if isinstance(entities_or_entities_pairs[0], str):
666
+ entities, entities_pair = entities_or_entities_pairs, None
667
+ else:
668
+ entities, entities_pair = entities_or_entities_pairs
669
+
670
+ entity_spans, entity_spans_pair = None, None
671
+ if batch_entity_spans_or_entity_spans_pairs is not None:
672
+ entity_spans_or_entity_spans_pairs = batch_entity_spans_or_entity_spans_pairs[index]
673
+ if len(entity_spans_or_entity_spans_pairs) > 0 and isinstance(
674
+ entity_spans_or_entity_spans_pairs[0], list
675
+ ):
676
+ entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs
677
+ else:
678
+ entity_spans, entity_spans_pair = entity_spans_or_entity_spans_pairs, None
679
+
680
+ (
681
+ first_ids,
682
+ second_ids,
683
+ first_entity_ids,
684
+ second_entity_ids,
685
+ first_entity_token_spans,
686
+ second_entity_token_spans,
687
+ ) = self._create_input_sequence(
688
+ text=text,
689
+ text_pair=text_pair,
690
+ entities=entities,
691
+ entities_pair=entities_pair,
692
+ entity_spans=entity_spans,
693
+ entity_spans_pair=entity_spans_pair,
694
+ **kwargs,
695
+ )
696
+ input_ids.append((first_ids, second_ids))
697
+ entity_ids.append((first_entity_ids, second_entity_ids))
698
+ entity_token_spans.append((first_entity_token_spans, second_entity_token_spans))
699
+
700
+ batch_outputs = self._batch_prepare_for_model(
701
+ input_ids,
702
+ batch_entity_ids_pairs=entity_ids,
703
+ batch_entity_token_spans_pairs=entity_token_spans,
704
+ add_special_tokens=add_special_tokens,
705
+ padding_strategy=padding_strategy,
706
+ truncation_strategy=truncation_strategy,
707
+ max_length=max_length,
708
+ max_entity_length=max_entity_length,
709
+ stride=stride,
710
+ pad_to_multiple_of=pad_to_multiple_of,
711
+ return_attention_mask=return_attention_mask,
712
+ return_token_type_ids=return_token_type_ids,
713
+ return_overflowing_tokens=return_overflowing_tokens,
714
+ return_special_tokens_mask=return_special_tokens_mask,
715
+ return_length=return_length,
716
+ return_tensors=return_tensors,
717
+ verbose=verbose,
718
+ )
719
+
720
+ return BatchEncoding(batch_outputs)
721
+
722
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._check_entity_input_format
723
+ def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]):
724
+ if not isinstance(entity_spans, list):
725
+ raise ValueError("entity_spans should be given as a list")
726
+ elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple):
727
+ raise ValueError(
728
+ "entity_spans should be given as a list of tuples containing the start and end character indices"
729
+ )
730
+
731
+ if entities is not None:
732
+ if not isinstance(entities, list):
733
+ raise ValueError("If you specify entities, they should be given as a list")
734
+
735
+ if len(entities) > 0 and not isinstance(entities[0], str):
736
+ raise ValueError("If you specify entities, they should be given as a list of entity names")
737
+
738
+ if len(entities) != len(entity_spans):
739
+ raise ValueError("If you specify entities, entities and entity_spans must be the same length")
740
+
741
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._create_input_sequence
742
+ def _create_input_sequence(
743
+ self,
744
+ text: Union[TextInput],
745
+ text_pair: Optional[Union[TextInput]] = None,
746
+ entities: Optional[EntityInput] = None,
747
+ entities_pair: Optional[EntityInput] = None,
748
+ entity_spans: Optional[EntitySpanInput] = None,
749
+ entity_spans_pair: Optional[EntitySpanInput] = None,
750
+ **kwargs,
751
+ ) -> Tuple[list, list, list, list, list, list]:
752
+ def get_input_ids(text):
753
+ tokens = self.tokenize(text, **kwargs)
754
+ return self.convert_tokens_to_ids(tokens)
755
+
756
+ def get_input_ids_and_entity_token_spans(text, entity_spans):
757
+ if entity_spans is None:
758
+ return get_input_ids(text), None
759
+
760
+ cur = 0
761
+ input_ids = []
762
+ entity_token_spans = [None] * len(entity_spans)
763
+
764
+ split_char_positions = sorted(frozenset(itertools.chain(*entity_spans)))
765
+ char_pos2token_pos = {}
766
+
767
+ for split_char_position in split_char_positions:
768
+ orig_split_char_position = split_char_position
769
+ if (
770
+ split_char_position > 0 and text[split_char_position - 1] == " "
771
+ ): # whitespace should be prepended to the following token
772
+ split_char_position -= 1
773
+ if cur != split_char_position:
774
+ input_ids += get_input_ids(text[cur:split_char_position])
775
+ cur = split_char_position
776
+ char_pos2token_pos[orig_split_char_position] = len(input_ids)
777
+
778
+ input_ids += get_input_ids(text[cur:])
779
+
780
+ entity_token_spans = [
781
+ (char_pos2token_pos[char_start], char_pos2token_pos[char_end]) for char_start, char_end in entity_spans
782
+ ]
783
+
784
+ return input_ids, entity_token_spans
785
+
786
+ first_ids, second_ids = None, None
787
+ first_entity_ids, second_entity_ids = None, None
788
+ first_entity_token_spans, second_entity_token_spans = None, None
789
+
790
+ if self.task is None:
791
+ if entity_spans is None:
792
+ first_ids = get_input_ids(text)
793
+ else:
794
+ self._check_entity_input_format(entities, entity_spans)
795
+
796
+ first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
797
+ if entities is None:
798
+ first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
799
+ else:
800
+ first_entity_ids = [self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities]
801
+
802
+ if text_pair is not None:
803
+ if entity_spans_pair is None:
804
+ second_ids = get_input_ids(text_pair)
805
+ else:
806
+ self._check_entity_input_format(entities_pair, entity_spans_pair)
807
+
808
+ second_ids, second_entity_token_spans = get_input_ids_and_entity_token_spans(
809
+ text_pair, entity_spans_pair
810
+ )
811
+ if entities_pair is None:
812
+ second_entity_ids = [self.entity_mask_token_id] * len(entity_spans_pair)
813
+ else:
814
+ second_entity_ids = [
815
+ self.entity_vocab.get(entity, self.entity_unk_token_id) for entity in entities_pair
816
+ ]
817
+
818
+ elif self.task == "entity_classification":
819
+ if not (isinstance(entity_spans, list) and len(entity_spans) == 1 and isinstance(entity_spans[0], tuple)):
820
+ raise ValueError(
821
+ "Entity spans should be a list containing a single tuple "
822
+ "containing the start and end character indices of an entity"
823
+ )
824
+ first_entity_ids = [self.entity_mask_token_id]
825
+ first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
826
+
827
+ # add special tokens to input ids
828
+ entity_token_start, entity_token_end = first_entity_token_spans[0]
829
+ first_ids = (
830
+ first_ids[:entity_token_end] + [self.additional_special_tokens_ids[0]] + first_ids[entity_token_end:]
831
+ )
832
+ first_ids = (
833
+ first_ids[:entity_token_start]
834
+ + [self.additional_special_tokens_ids[0]]
835
+ + first_ids[entity_token_start:]
836
+ )
837
+ first_entity_token_spans = [(entity_token_start, entity_token_end + 2)]
838
+
839
+ elif self.task == "entity_pair_classification":
840
+ if not (
841
+ isinstance(entity_spans, list)
842
+ and len(entity_spans) == 2
843
+ and isinstance(entity_spans[0], tuple)
844
+ and isinstance(entity_spans[1], tuple)
845
+ ):
846
+ raise ValueError(
847
+ "Entity spans should be provided as a list of two tuples, "
848
+ "each tuple containing the start and end character indices of an entity"
849
+ )
850
+
851
+ head_span, tail_span = entity_spans
852
+ first_entity_ids = [self.entity_mask_token_id, self.entity_mask2_token_id]
853
+ first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
854
+
855
+ head_token_span, tail_token_span = first_entity_token_spans
856
+ token_span_with_special_token_ids = [
857
+ (head_token_span, self.additional_special_tokens_ids[0]),
858
+ (tail_token_span, self.additional_special_tokens_ids[1]),
859
+ ]
860
+ if head_token_span[0] < tail_token_span[0]:
861
+ first_entity_token_spans[0] = (head_token_span[0], head_token_span[1] + 2)
862
+ first_entity_token_spans[1] = (tail_token_span[0] + 2, tail_token_span[1] + 4)
863
+ token_span_with_special_token_ids = reversed(token_span_with_special_token_ids)
864
+ else:
865
+ first_entity_token_spans[0] = (head_token_span[0] + 2, head_token_span[1] + 4)
866
+ first_entity_token_spans[1] = (tail_token_span[0], tail_token_span[1] + 2)
867
+
868
+ for (entity_token_start, entity_token_end), special_token_id in token_span_with_special_token_ids:
869
+ first_ids = first_ids[:entity_token_end] + [special_token_id] + first_ids[entity_token_end:]
870
+ first_ids = first_ids[:entity_token_start] + [special_token_id] + first_ids[entity_token_start:]
871
+
872
+ elif self.task == "entity_span_classification":
873
+ if not (isinstance(entity_spans, list) and len(entity_spans) > 0 and isinstance(entity_spans[0], tuple)):
874
+ raise ValueError(
875
+ "Entity spans should be provided as a list of tuples, "
876
+ "each tuple containing the start and end character indices of an entity"
877
+ )
878
+
879
+ first_ids, first_entity_token_spans = get_input_ids_and_entity_token_spans(text, entity_spans)
880
+ first_entity_ids = [self.entity_mask_token_id] * len(entity_spans)
881
+
882
+ else:
883
+ raise ValueError(f"Task {self.task} not supported")
884
+
885
+ return (
886
+ first_ids,
887
+ second_ids,
888
+ first_entity_ids,
889
+ second_entity_ids,
890
+ first_entity_token_spans,
891
+ second_entity_token_spans,
892
+ )
893
+
894
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
895
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._batch_prepare_for_model
896
+ def _batch_prepare_for_model(
897
+ self,
898
+ batch_ids_pairs: List[Tuple[List[int], None]],
899
+ batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]],
900
+ batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]],
901
+ add_special_tokens: bool = True,
902
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
903
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
904
+ max_length: Optional[int] = None,
905
+ max_entity_length: Optional[int] = None,
906
+ stride: int = 0,
907
+ pad_to_multiple_of: Optional[int] = None,
908
+ return_tensors: Optional[str] = None,
909
+ return_token_type_ids: Optional[bool] = None,
910
+ return_attention_mask: Optional[bool] = None,
911
+ return_overflowing_tokens: bool = False,
912
+ return_special_tokens_mask: bool = False,
913
+ return_length: bool = False,
914
+ verbose: bool = True,
915
+ ) -> BatchEncoding:
916
+ """
917
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
918
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
919
+ manages a moving window (with user defined stride) for overflowing tokens
920
+
921
+
922
+ Args:
923
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
924
+ batch_entity_ids_pairs: list of entity ids or entity ids pairs
925
+ batch_entity_token_spans_pairs: list of entity spans or entity spans pairs
926
+ max_entity_length: The maximum length of the entity sequence.
927
+ """
928
+
929
+ batch_outputs = {}
930
+ for input_ids, entity_ids, entity_token_span_pairs in zip(
931
+ batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs
932
+ ):
933
+ first_ids, second_ids = input_ids
934
+ first_entity_ids, second_entity_ids = entity_ids
935
+ first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs
936
+ outputs = self.prepare_for_model(
937
+ first_ids,
938
+ second_ids,
939
+ entity_ids=first_entity_ids,
940
+ pair_entity_ids=second_entity_ids,
941
+ entity_token_spans=first_entity_token_spans,
942
+ pair_entity_token_spans=second_entity_token_spans,
943
+ add_special_tokens=add_special_tokens,
944
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
945
+ truncation=truncation_strategy.value,
946
+ max_length=max_length,
947
+ max_entity_length=max_entity_length,
948
+ stride=stride,
949
+ pad_to_multiple_of=None, # we pad in batch afterward
950
+ return_attention_mask=False, # we pad in batch afterward
951
+ return_token_type_ids=return_token_type_ids,
952
+ return_overflowing_tokens=return_overflowing_tokens,
953
+ return_special_tokens_mask=return_special_tokens_mask,
954
+ return_length=return_length,
955
+ return_tensors=None, # We convert the whole batch to tensors at the end
956
+ prepend_batch_axis=False,
957
+ verbose=verbose,
958
+ )
959
+
960
+ for key, value in outputs.items():
961
+ if key not in batch_outputs:
962
+ batch_outputs[key] = []
963
+ batch_outputs[key].append(value)
964
+
965
+ batch_outputs = self.pad(
966
+ batch_outputs,
967
+ padding=padding_strategy.value,
968
+ max_length=max_length,
969
+ pad_to_multiple_of=pad_to_multiple_of,
970
+ return_attention_mask=return_attention_mask,
971
+ )
972
+
973
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
974
+
975
+ return batch_outputs
976
+
977
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
978
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.prepare_for_model
979
+ def prepare_for_model(
980
+ self,
981
+ ids: List[int],
982
+ pair_ids: Optional[List[int]] = None,
983
+ entity_ids: Optional[List[int]] = None,
984
+ pair_entity_ids: Optional[List[int]] = None,
985
+ entity_token_spans: Optional[List[Tuple[int, int]]] = None,
986
+ pair_entity_token_spans: Optional[List[Tuple[int, int]]] = None,
987
+ add_special_tokens: bool = True,
988
+ padding: Union[bool, str, PaddingStrategy] = False,
989
+ truncation: Union[bool, str, TruncationStrategy] = None,
990
+ max_length: Optional[int] = None,
991
+ max_entity_length: Optional[int] = None,
992
+ stride: int = 0,
993
+ pad_to_multiple_of: Optional[int] = None,
994
+ return_tensors: Optional[Union[str, TensorType]] = None,
995
+ return_token_type_ids: Optional[bool] = None,
996
+ return_attention_mask: Optional[bool] = None,
997
+ return_overflowing_tokens: bool = False,
998
+ return_special_tokens_mask: bool = False,
999
+ return_offsets_mapping: bool = False,
1000
+ return_length: bool = False,
1001
+ verbose: bool = True,
1002
+ prepend_batch_axis: bool = False,
1003
+ **kwargs,
1004
+ ) -> BatchEncoding:
1005
+ """
1006
+ Prepares a sequence of input id, entity id and entity span, or a pair of sequences of inputs ids, entity ids,
1007
+ entity spans so that it can be used by the model. It adds special tokens, truncates sequences if overflowing
1008
+ while taking into account the special tokens and manages a moving window (with user defined stride) for
1009
+ overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first*
1010
+ or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an
1011
+ error.
1012
+
1013
+ Args:
1014
+ ids (`List[int]`):
1015
+ Tokenized input ids of the first sequence.
1016
+ pair_ids (`List[int]`, *optional*):
1017
+ Tokenized input ids of the second sequence.
1018
+ entity_ids (`List[int]`, *optional*):
1019
+ Entity ids of the first sequence.
1020
+ pair_entity_ids (`List[int]`, *optional*):
1021
+ Entity ids of the second sequence.
1022
+ entity_token_spans (`List[Tuple[int, int]]`, *optional*):
1023
+ Entity spans of the first sequence.
1024
+ pair_entity_token_spans (`List[Tuple[int, int]]`, *optional*):
1025
+ Entity spans of the second sequence.
1026
+ max_entity_length (`int`, *optional*):
1027
+ The maximum length of the entity sequence.
1028
+ """
1029
+
1030
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
1031
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
1032
+ padding=padding,
1033
+ truncation=truncation,
1034
+ max_length=max_length,
1035
+ pad_to_multiple_of=pad_to_multiple_of,
1036
+ verbose=verbose,
1037
+ **kwargs,
1038
+ )
1039
+
1040
+ # Compute lengths
1041
+ pair = bool(pair_ids is not None)
1042
+ len_ids = len(ids)
1043
+ len_pair_ids = len(pair_ids) if pair else 0
1044
+
1045
+ if return_token_type_ids and not add_special_tokens:
1046
+ raise ValueError(
1047
+ "Asking to return token_type_ids while setting add_special_tokens to False "
1048
+ "results in an undefined behavior. Please set add_special_tokens to True or "
1049
+ "set return_token_type_ids to None."
1050
+ )
1051
+ if (
1052
+ return_overflowing_tokens
1053
+ and truncation_strategy == TruncationStrategy.LONGEST_FIRST
1054
+ and pair_ids is not None
1055
+ ):
1056
+ raise ValueError(
1057
+ "Not possible to return overflowing tokens for pair of sequences with the "
1058
+ "`longest_first`. Please select another truncation strategy than `longest_first`, "
1059
+ "for instance `only_second` or `only_first`."
1060
+ )
1061
+
1062
+ # Load from model defaults
1063
+ if return_token_type_ids is None:
1064
+ return_token_type_ids = "token_type_ids" in self.model_input_names
1065
+ if return_attention_mask is None:
1066
+ return_attention_mask = "attention_mask" in self.model_input_names
1067
+
1068
+ encoded_inputs = {}
1069
+
1070
+ # Compute the total size of the returned word encodings
1071
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
1072
+
1073
+ # Truncation: Handle max sequence length and max_entity_length
1074
+ overflowing_tokens = []
1075
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
1076
+ # truncate words up to max_length
1077
+ ids, pair_ids, overflowing_tokens = self.truncate_sequences(
1078
+ ids,
1079
+ pair_ids=pair_ids,
1080
+ num_tokens_to_remove=total_len - max_length,
1081
+ truncation_strategy=truncation_strategy,
1082
+ stride=stride,
1083
+ )
1084
+
1085
+ if return_overflowing_tokens:
1086
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
1087
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
1088
+
1089
+ # Add special tokens
1090
+ if add_special_tokens:
1091
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
1092
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
1093
+ entity_token_offset = 1 # 1 * <s> token
1094
+ pair_entity_token_offset = len(ids) + 3 # 1 * <s> token & 2 * <sep> tokens
1095
+ else:
1096
+ sequence = ids + pair_ids if pair else ids
1097
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
1098
+ entity_token_offset = 0
1099
+ pair_entity_token_offset = len(ids)
1100
+
1101
+ # Build output dictionary
1102
+ encoded_inputs["input_ids"] = sequence
1103
+ if return_token_type_ids:
1104
+ encoded_inputs["token_type_ids"] = token_type_ids
1105
+ if return_special_tokens_mask:
1106
+ if add_special_tokens:
1107
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
1108
+ else:
1109
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
1110
+
1111
+ # Set max entity length
1112
+ if not max_entity_length:
1113
+ max_entity_length = self.max_entity_length
1114
+
1115
+ if entity_ids is not None:
1116
+ total_entity_len = 0
1117
+ num_invalid_entities = 0
1118
+ valid_entity_ids = [ent_id for ent_id, span in zip(entity_ids, entity_token_spans) if span[1] <= len(ids)]
1119
+ valid_entity_token_spans = [span for span in entity_token_spans if span[1] <= len(ids)]
1120
+
1121
+ total_entity_len += len(valid_entity_ids)
1122
+ num_invalid_entities += len(entity_ids) - len(valid_entity_ids)
1123
+
1124
+ valid_pair_entity_ids, valid_pair_entity_token_spans = None, None
1125
+ if pair_entity_ids is not None:
1126
+ valid_pair_entity_ids = [
1127
+ ent_id
1128
+ for ent_id, span in zip(pair_entity_ids, pair_entity_token_spans)
1129
+ if span[1] <= len(pair_ids)
1130
+ ]
1131
+ valid_pair_entity_token_spans = [span for span in pair_entity_token_spans if span[1] <= len(pair_ids)]
1132
+ total_entity_len += len(valid_pair_entity_ids)
1133
+ num_invalid_entities += len(pair_entity_ids) - len(valid_pair_entity_ids)
1134
+
1135
+ if num_invalid_entities != 0:
1136
+ logger.warning(
1137
+ f"{num_invalid_entities} entities are ignored because their entity spans are invalid due to the"
1138
+ " truncation of input tokens"
1139
+ )
1140
+
1141
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and total_entity_len > max_entity_length:
1142
+ # truncate entities up to max_entity_length
1143
+ valid_entity_ids, valid_pair_entity_ids, overflowing_entities = self.truncate_sequences(
1144
+ valid_entity_ids,
1145
+ pair_ids=valid_pair_entity_ids,
1146
+ num_tokens_to_remove=total_entity_len - max_entity_length,
1147
+ truncation_strategy=truncation_strategy,
1148
+ stride=stride,
1149
+ )
1150
+ valid_entity_token_spans = valid_entity_token_spans[: len(valid_entity_ids)]
1151
+ if valid_pair_entity_token_spans is not None:
1152
+ valid_pair_entity_token_spans = valid_pair_entity_token_spans[: len(valid_pair_entity_ids)]
1153
+
1154
+ if return_overflowing_tokens:
1155
+ encoded_inputs["overflowing_entities"] = overflowing_entities
1156
+ encoded_inputs["num_truncated_entities"] = total_entity_len - max_entity_length
1157
+
1158
+ final_entity_ids = valid_entity_ids + valid_pair_entity_ids if valid_pair_entity_ids else valid_entity_ids
1159
+ encoded_inputs["entity_ids"] = list(final_entity_ids)
1160
+ entity_position_ids = []
1161
+ entity_start_positions = []
1162
+ entity_end_positions = []
1163
+ for token_spans, offset in (
1164
+ (valid_entity_token_spans, entity_token_offset),
1165
+ (valid_pair_entity_token_spans, pair_entity_token_offset),
1166
+ ):
1167
+ if token_spans is not None:
1168
+ for start, end in token_spans:
1169
+ start += offset
1170
+ end += offset
1171
+ position_ids = list(range(start, end))[: self.max_mention_length]
1172
+ position_ids += [-1] * (self.max_mention_length - end + start)
1173
+ entity_position_ids.append(position_ids)
1174
+ entity_start_positions.append(start)
1175
+ entity_end_positions.append(end - 1)
1176
+
1177
+ encoded_inputs["entity_position_ids"] = entity_position_ids
1178
+ if self.task == "entity_span_classification":
1179
+ encoded_inputs["entity_start_positions"] = entity_start_positions
1180
+ encoded_inputs["entity_end_positions"] = entity_end_positions
1181
+
1182
+ if return_token_type_ids:
1183
+ encoded_inputs["entity_token_type_ids"] = [0] * len(encoded_inputs["entity_ids"])
1184
+
1185
+ # Check lengths
1186
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
1187
+
1188
+ # Padding
1189
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
1190
+ encoded_inputs = self.pad(
1191
+ encoded_inputs,
1192
+ max_length=max_length,
1193
+ max_entity_length=max_entity_length,
1194
+ padding=padding_strategy.value,
1195
+ pad_to_multiple_of=pad_to_multiple_of,
1196
+ return_attention_mask=return_attention_mask,
1197
+ )
1198
+
1199
+ if return_length:
1200
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
1201
+
1202
+ batch_outputs = BatchEncoding(
1203
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
1204
+ )
1205
+
1206
+ return batch_outputs
1207
+
1208
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer.pad
1209
+ def pad(
1210
+ self,
1211
+ encoded_inputs: Union[
1212
+ BatchEncoding,
1213
+ List[BatchEncoding],
1214
+ Dict[str, EncodedInput],
1215
+ Dict[str, List[EncodedInput]],
1216
+ List[Dict[str, EncodedInput]],
1217
+ ],
1218
+ padding: Union[bool, str, PaddingStrategy] = True,
1219
+ max_length: Optional[int] = None,
1220
+ max_entity_length: Optional[int] = None,
1221
+ pad_to_multiple_of: Optional[int] = None,
1222
+ return_attention_mask: Optional[bool] = None,
1223
+ return_tensors: Optional[Union[str, TensorType]] = None,
1224
+ verbose: bool = True,
1225
+ ) -> BatchEncoding:
1226
+ """
1227
+ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
1228
+ in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with
1229
+ `self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`) .. note:: If the `encoded_inputs` passed
1230
+ are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless
1231
+ you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the
1232
+ specific device of your tensors however.
1233
+
1234
+ Args:
1235
+ encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
1236
+ Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
1237
+ tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
1238
+ List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
1239
+ collate function. Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or
1240
+ TensorFlow tensors), see the note above for the return type.
1241
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
1242
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
1243
+ index) among:
1244
+
1245
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
1246
+ sequence if provided).
1247
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
1248
+ acceptable input length for the model if that argument is not provided.
1249
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
1250
+ lengths).
1251
+ max_length (`int`, *optional*):
1252
+ Maximum length of the returned list and optionally padding length (see above).
1253
+ max_entity_length (`int`, *optional*):
1254
+ The maximum length of the entity sequence.
1255
+ pad_to_multiple_of (`int`, *optional*):
1256
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
1257
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
1258
+ return_attention_mask (`bool`, *optional*):
1259
+ Whether to return the attention mask. If left to the default, will return the attention mask according
1260
+ to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention
1261
+ masks?](../glossary#attention-mask)
1262
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
1263
+ If set, will return tensors instead of list of python integers. Acceptable values are:
1264
+
1265
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
1266
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
1267
+ - `'np'`: Return Numpy `np.ndarray` objects.
1268
+ verbose (`bool`, *optional*, defaults to `True`):
1269
+ Whether or not to print more information and warnings.
1270
+ """
1271
+ # If we have a list of dicts, let's convert it in a dict of lists
1272
+ # We do this to allow using this method as a collate_fn function in PyTorch Dataloader
1273
+ if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping):
1274
+ encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
1275
+
1276
+ # The model's main input name, usually `input_ids`, has be passed for padding
1277
+ if self.model_input_names[0] not in encoded_inputs:
1278
+ raise ValueError(
1279
+ "You should supply an encoding or a list of encodings to this method "
1280
+ f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
1281
+ )
1282
+
1283
+ required_input = encoded_inputs[self.model_input_names[0]]
1284
+
1285
+ if not required_input:
1286
+ if return_attention_mask:
1287
+ encoded_inputs["attention_mask"] = []
1288
+ return encoded_inputs
1289
+
1290
+ # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
1291
+ # and rebuild them afterwards if no return_tensors is specified
1292
+ # Note that we lose the specific device the tensor may be on for PyTorch
1293
+
1294
+ first_element = required_input[0]
1295
+ if isinstance(first_element, (list, tuple)):
1296
+ # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
1297
+ index = 0
1298
+ while len(required_input[index]) == 0:
1299
+ index += 1
1300
+ if index < len(required_input):
1301
+ first_element = required_input[index][0]
1302
+ # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
1303
+ if not isinstance(first_element, (int, list, tuple)):
1304
+ if is_tf_tensor(first_element):
1305
+ return_tensors = "tf" if return_tensors is None else return_tensors
1306
+ elif is_torch_tensor(first_element):
1307
+ return_tensors = "pt" if return_tensors is None else return_tensors
1308
+ elif isinstance(first_element, np.ndarray):
1309
+ return_tensors = "np" if return_tensors is None else return_tensors
1310
+ else:
1311
+ raise ValueError(
1312
+ f"type of {first_element} unknown: {type(first_element)}. "
1313
+ "Should be one of a python, numpy, pytorch or tensorflow object."
1314
+ )
1315
+
1316
+ for key, value in encoded_inputs.items():
1317
+ encoded_inputs[key] = to_py_obj(value)
1318
+
1319
+ # Convert padding_strategy in PaddingStrategy
1320
+ padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
1321
+ padding=padding, max_length=max_length, verbose=verbose
1322
+ )
1323
+
1324
+ if max_entity_length is None:
1325
+ max_entity_length = self.max_entity_length
1326
+
1327
+ required_input = encoded_inputs[self.model_input_names[0]]
1328
+ if required_input and not isinstance(required_input[0], (list, tuple)):
1329
+ encoded_inputs = self._pad(
1330
+ encoded_inputs,
1331
+ max_length=max_length,
1332
+ max_entity_length=max_entity_length,
1333
+ padding_strategy=padding_strategy,
1334
+ pad_to_multiple_of=pad_to_multiple_of,
1335
+ return_attention_mask=return_attention_mask,
1336
+ )
1337
+ return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
1338
+
1339
+ batch_size = len(required_input)
1340
+ if any(len(v) != batch_size for v in encoded_inputs.values()):
1341
+ raise ValueError("Some items in the output dictionary have a different batch size than others.")
1342
+
1343
+ if padding_strategy == PaddingStrategy.LONGEST:
1344
+ max_length = max(len(inputs) for inputs in required_input)
1345
+ max_entity_length = (
1346
+ max(len(inputs) for inputs in encoded_inputs["entity_ids"]) if "entity_ids" in encoded_inputs else 0
1347
+ )
1348
+ padding_strategy = PaddingStrategy.MAX_LENGTH
1349
+
1350
+ batch_outputs = {}
1351
+ for i in range(batch_size):
1352
+ inputs = {k: v[i] for k, v in encoded_inputs.items()}
1353
+ outputs = self._pad(
1354
+ inputs,
1355
+ max_length=max_length,
1356
+ max_entity_length=max_entity_length,
1357
+ padding_strategy=padding_strategy,
1358
+ pad_to_multiple_of=pad_to_multiple_of,
1359
+ return_attention_mask=return_attention_mask,
1360
+ )
1361
+
1362
+ for key, value in outputs.items():
1363
+ if key not in batch_outputs:
1364
+ batch_outputs[key] = []
1365
+ batch_outputs[key].append(value)
1366
+
1367
+ return BatchEncoding(batch_outputs, tensor_type=return_tensors)
1368
+
1369
+ # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._pad
1370
+ def _pad(
1371
+ self,
1372
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1373
+ max_length: Optional[int] = None,
1374
+ max_entity_length: Optional[int] = None,
1375
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1376
+ pad_to_multiple_of: Optional[int] = None,
1377
+ return_attention_mask: Optional[bool] = None,
1378
+ ) -> dict:
1379
+ """
1380
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1381
+
1382
+
1383
+ Args:
1384
+ encoded_inputs:
1385
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1386
+ max_length: maximum length of the returned list and optionally padding length (see below).
1387
+ Will truncate by taking into account the special tokens.
1388
+ max_entity_length: The maximum length of the entity sequence.
1389
+ padding_strategy: PaddingStrategy to use for padding.
1390
+
1391
+
1392
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1393
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1394
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1395
+ The tokenizer padding sides are defined in self.padding_side:
1396
+
1397
+
1398
+ - 'left': pads on the left of the sequences
1399
+ - 'right': pads on the right of the sequences
1400
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1401
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1402
+ `>= 7.5` (Volta).
1403
+ return_attention_mask:
1404
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1405
+ """
1406
+ entities_provided = bool("entity_ids" in encoded_inputs)
1407
+
1408
+ # Load from model defaults
1409
+ if return_attention_mask is None:
1410
+ return_attention_mask = "attention_mask" in self.model_input_names
1411
+
1412
+ if padding_strategy == PaddingStrategy.LONGEST:
1413
+ max_length = len(encoded_inputs["input_ids"])
1414
+ if entities_provided:
1415
+ max_entity_length = len(encoded_inputs["entity_ids"])
1416
+
1417
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1418
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1419
+
1420
+ if (
1421
+ entities_provided
1422
+ and max_entity_length is not None
1423
+ and pad_to_multiple_of is not None
1424
+ and (max_entity_length % pad_to_multiple_of != 0)
1425
+ ):
1426
+ max_entity_length = ((max_entity_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1427
+
1428
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (
1429
+ len(encoded_inputs["input_ids"]) != max_length
1430
+ or (entities_provided and len(encoded_inputs["entity_ids"]) != max_entity_length)
1431
+ )
1432
+
1433
+ # Initialize attention mask if not present.
1434
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1435
+ encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
1436
+ if entities_provided and return_attention_mask and "entity_attention_mask" not in encoded_inputs:
1437
+ encoded_inputs["entity_attention_mask"] = [1] * len(encoded_inputs["entity_ids"])
1438
+
1439
+ if needs_to_be_padded:
1440
+ difference = max_length - len(encoded_inputs["input_ids"])
1441
+ if entities_provided:
1442
+ entity_difference = max_entity_length - len(encoded_inputs["entity_ids"])
1443
+ if self.padding_side == "right":
1444
+ if return_attention_mask:
1445
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1446
+ if entities_provided:
1447
+ encoded_inputs["entity_attention_mask"] = (
1448
+ encoded_inputs["entity_attention_mask"] + [0] * entity_difference
1449
+ )
1450
+ if "token_type_ids" in encoded_inputs:
1451
+ encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"] + [0] * difference
1452
+ if entities_provided:
1453
+ encoded_inputs["entity_token_type_ids"] = (
1454
+ encoded_inputs["entity_token_type_ids"] + [0] * entity_difference
1455
+ )
1456
+ if "special_tokens_mask" in encoded_inputs:
1457
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1458
+ encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
1459
+ if entities_provided:
1460
+ encoded_inputs["entity_ids"] = (
1461
+ encoded_inputs["entity_ids"] + [self.entity_pad_token_id] * entity_difference
1462
+ )
1463
+ encoded_inputs["entity_position_ids"] = (
1464
+ encoded_inputs["entity_position_ids"] + [[-1] * self.max_mention_length] * entity_difference
1465
+ )
1466
+ if self.task == "entity_span_classification":
1467
+ encoded_inputs["entity_start_positions"] = (
1468
+ encoded_inputs["entity_start_positions"] + [0] * entity_difference
1469
+ )
1470
+ encoded_inputs["entity_end_positions"] = (
1471
+ encoded_inputs["entity_end_positions"] + [0] * entity_difference
1472
+ )
1473
+
1474
+ elif self.padding_side == "left":
1475
+ if return_attention_mask:
1476
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1477
+ if entities_provided:
1478
+ encoded_inputs["entity_attention_mask"] = [0] * entity_difference + encoded_inputs[
1479
+ "entity_attention_mask"
1480
+ ]
1481
+ if "token_type_ids" in encoded_inputs:
1482
+ encoded_inputs["token_type_ids"] = [0] * difference + encoded_inputs["token_type_ids"]
1483
+ if entities_provided:
1484
+ encoded_inputs["entity_token_type_ids"] = [0] * entity_difference + encoded_inputs[
1485
+ "entity_token_type_ids"
1486
+ ]
1487
+ if "special_tokens_mask" in encoded_inputs:
1488
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1489
+ encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
1490
+ if entities_provided:
1491
+ encoded_inputs["entity_ids"] = [self.entity_pad_token_id] * entity_difference + encoded_inputs[
1492
+ "entity_ids"
1493
+ ]
1494
+ encoded_inputs["entity_position_ids"] = [
1495
+ [-1] * self.max_mention_length
1496
+ ] * entity_difference + encoded_inputs["entity_position_ids"]
1497
+ if self.task == "entity_span_classification":
1498
+ encoded_inputs["entity_start_positions"] = [0] * entity_difference + encoded_inputs[
1499
+ "entity_start_positions"
1500
+ ]
1501
+ encoded_inputs["entity_end_positions"] = [0] * entity_difference + encoded_inputs[
1502
+ "entity_end_positions"
1503
+ ]
1504
+ else:
1505
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1506
+
1507
+ return encoded_inputs
1508
+
1509
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str, str]:
1510
+ if not os.path.isdir(save_directory):
1511
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
1512
+ return
1513
+
1514
+ out_vocab_file = os.path.join(
1515
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
1516
+ )
1517
+
1518
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
1519
+ copyfile(self.vocab_file, out_vocab_file)
1520
+ elif not os.path.isfile(self.vocab_file):
1521
+ with open(out_vocab_file, "wb") as fi:
1522
+ content_spiece_model = self.sp_model.serialized_model_proto()
1523
+ fi.write(content_spiece_model)
1524
+
1525
+ entity_vocab_file = os.path.join(
1526
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["entity_vocab_file"]
1527
+ )
1528
+
1529
+ with open(entity_vocab_file, "w", encoding="utf-8") as f:
1530
+ f.write(json.dumps(self.entity_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
1531
+
1532
+ return out_vocab_file, entity_vocab_file
1533
+
1534
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.build_inputs_with_special_tokens
1535
+ def build_inputs_with_special_tokens(
1536
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
1537
+ ) -> List[int]:
1538
+ """
1539
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
1540
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
1541
+
1542
+ - single sequence: `<s> X </s>`
1543
+ - pair of sequences: `<s> A </s></s> B </s>`
1544
+
1545
+ Args:
1546
+ token_ids_0 (`List[int]`):
1547
+ List of IDs to which the special tokens will be added.
1548
+ token_ids_1 (`List[int]`, *optional*):
1549
+ Optional second list of IDs for sequence pairs.
1550
+
1551
+ Returns:
1552
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
1553
+ """
1554
+
1555
+ if token_ids_1 is None:
1556
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
1557
+ cls = [self.cls_token_id]
1558
+ sep = [self.sep_token_id]
1559
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
1560
+
1561
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.get_special_tokens_mask
1562
+ def get_special_tokens_mask(
1563
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
1564
+ ) -> List[int]:
1565
+ """
1566
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
1567
+ special tokens using the tokenizer `prepare_for_model` method.
1568
+
1569
+ Args:
1570
+ token_ids_0 (`List[int]`):
1571
+ List of IDs.
1572
+ token_ids_1 (`List[int]`, *optional*):
1573
+ Optional second list of IDs for sequence pairs.
1574
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
1575
+ Whether or not the token list is already formatted with special tokens for the model.
1576
+
1577
+ Returns:
1578
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
1579
+ """
1580
+
1581
+ if already_has_special_tokens:
1582
+ return super().get_special_tokens_mask(
1583
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
1584
+ )
1585
+
1586
+ if token_ids_1 is None:
1587
+ return [1] + ([0] * len(token_ids_0)) + [1]
1588
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
1589
+
1590
+ # Copied from transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer.create_token_type_ids_from_sequences
1591
+ def create_token_type_ids_from_sequences(
1592
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
1593
+ ) -> List[int]:
1594
+ """
1595
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
1596
+ not make use of token type ids, therefore a list of zeros is returned.
1597
+
1598
+ Args:
1599
+ token_ids_0 (`List[int]`):
1600
+ List of IDs.
1601
+ token_ids_1 (`List[int]`, *optional*):
1602
+ Optional second list of IDs for sequence pairs.
1603
+
1604
+ Returns:
1605
+ `List[int]`: List of zeros.
1606
+
1607
+ """
1608
+
1609
+ sep = [self.sep_token_id]
1610
+ cls = [self.cls_token_id]
1611
+
1612
+ if token_ids_1 is None:
1613
+ return len(cls + token_ids_0 + sep) * [0]
1614
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__init__.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tf_available,
22
+ is_tokenizers_available,
23
+ is_torch_available,
24
+ )
25
+
26
+
27
+ _import_structure = {"configuration_pegasus": ["PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig"]}
28
+
29
+ try:
30
+ if not is_sentencepiece_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_pegasus"] = ["PegasusTokenizer"]
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_pegasus_fast"] = ["PegasusTokenizerFast"]
44
+
45
+ try:
46
+ if not is_torch_available():
47
+ raise OptionalDependencyNotAvailable()
48
+ except OptionalDependencyNotAvailable:
49
+ pass
50
+ else:
51
+ _import_structure["modeling_pegasus"] = [
52
+ "PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
53
+ "PegasusForCausalLM",
54
+ "PegasusForConditionalGeneration",
55
+ "PegasusModel",
56
+ "PegasusPreTrainedModel",
57
+ ]
58
+
59
+ try:
60
+ if not is_tf_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ _import_structure["modeling_tf_pegasus"] = [
66
+ "TFPegasusForConditionalGeneration",
67
+ "TFPegasusModel",
68
+ "TFPegasusPreTrainedModel",
69
+ ]
70
+
71
+ try:
72
+ if not is_flax_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ _import_structure["modeling_flax_pegasus"] = [
78
+ "FlaxPegasusForConditionalGeneration",
79
+ "FlaxPegasusModel",
80
+ "FlaxPegasusPreTrainedModel",
81
+ ]
82
+
83
+
84
+ if TYPE_CHECKING:
85
+ from .configuration_pegasus import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig
86
+
87
+ try:
88
+ if not is_sentencepiece_available():
89
+ raise OptionalDependencyNotAvailable()
90
+ except OptionalDependencyNotAvailable:
91
+ pass
92
+ else:
93
+ from .tokenization_pegasus import PegasusTokenizer
94
+
95
+ try:
96
+ if not is_tokenizers_available():
97
+ raise OptionalDependencyNotAvailable()
98
+ except OptionalDependencyNotAvailable:
99
+ pass
100
+ else:
101
+ from .tokenization_pegasus_fast import PegasusTokenizerFast
102
+
103
+ try:
104
+ if not is_torch_available():
105
+ raise OptionalDependencyNotAvailable()
106
+ except OptionalDependencyNotAvailable:
107
+ pass
108
+ else:
109
+ from .modeling_pegasus import (
110
+ PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
111
+ PegasusForCausalLM,
112
+ PegasusForConditionalGeneration,
113
+ PegasusModel,
114
+ PegasusPreTrainedModel,
115
+ )
116
+
117
+ try:
118
+ if not is_tf_available():
119
+ raise OptionalDependencyNotAvailable()
120
+ except OptionalDependencyNotAvailable:
121
+ pass
122
+ else:
123
+ from .modeling_tf_pegasus import TFPegasusForConditionalGeneration, TFPegasusModel, TFPegasusPreTrainedModel
124
+
125
+ try:
126
+ if not is_flax_available():
127
+ raise OptionalDependencyNotAvailable()
128
+ except OptionalDependencyNotAvailable:
129
+ pass
130
+ else:
131
+ from .modeling_flax_pegasus import (
132
+ FlaxPegasusForConditionalGeneration,
133
+ FlaxPegasusModel,
134
+ FlaxPegasusPreTrainedModel,
135
+ )
136
+
137
+ else:
138
+ import sys
139
+
140
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.99 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/configuration_pegasus.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/convert_pegasus_tf_to_pytorch.cpython-310.pyc ADDED
Binary file (4.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/modeling_flax_pegasus.cpython-310.pyc ADDED
Binary file (43.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/modeling_pegasus.cpython-310.pyc ADDED
Binary file (55.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/modeling_tf_pegasus.cpython-310.pyc ADDED
Binary file (51.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/tokenization_pegasus.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/__pycache__/tokenization_pegasus_fast.cpython-310.pyc ADDED
Binary file (8.09 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/configuration_pegasus.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PEGASUS model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class PegasusConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an
30
+ PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration
31
+ with the defaults will yield a similar configuration to that of the PEGASUS
32
+ [google/pegasus-large](https://huggingface.co/google/pegasus-large) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 50265):
40
+ Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`PegasusModel`] or [`TFPegasusModel`].
42
+ d_model (`int`, *optional*, defaults to 1024):
43
+ Dimensionality of the layers and the pooler layer.
44
+ encoder_layers (`int`, *optional*, defaults to 12):
45
+ Number of encoder layers.
46
+ decoder_layers (`int`, *optional*, defaults to 12):
47
+ Number of decoder layers.
48
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer decoder.
52
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
53
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
54
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
55
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
56
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
57
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
58
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
59
+ dropout (`float`, *optional*, defaults to 0.1):
60
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
61
+ attention_dropout (`float`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the attention probabilities.
63
+ activation_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for activations inside the fully connected layer.
65
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
66
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
67
+ just in case (e.g., 512 or 1024 or 2048).
68
+ init_std (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
71
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
72
+ for more details.
73
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
74
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
75
+ for more details.
76
+ scale_embedding (`bool`, *optional*, defaults to `False`):
77
+ Scale embeddings by diving by sqrt(d_model).
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models)
80
+ forced_eos_token_id (`int`, *optional*, defaults to 1):
81
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
82
+ `eos_token_id`.
83
+
84
+ Example:
85
+
86
+ ```python
87
+ >>> from transformers import PegasusConfig, PegasusModel
88
+
89
+ >>> # Initializing a PEGASUS google/pegasus-large style configuration
90
+ >>> configuration = PegasusConfig()
91
+
92
+ >>> # Initializing a model (with random weights) from the google/pegasus-large style configuration
93
+ >>> model = PegasusModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "pegasus"
100
+ keys_to_ignore_at_inference = ["past_key_values"]
101
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
102
+
103
+ def __init__(
104
+ self,
105
+ vocab_size=50265,
106
+ max_position_embeddings=1024,
107
+ encoder_layers=12,
108
+ encoder_ffn_dim=4096,
109
+ encoder_attention_heads=16,
110
+ decoder_layers=12,
111
+ decoder_ffn_dim=4096,
112
+ decoder_attention_heads=16,
113
+ encoder_layerdrop=0.0,
114
+ decoder_layerdrop=0.0,
115
+ use_cache=True,
116
+ is_encoder_decoder=True,
117
+ activation_function="gelu",
118
+ d_model=1024,
119
+ dropout=0.1,
120
+ attention_dropout=0.0,
121
+ activation_dropout=0.0,
122
+ init_std=0.02,
123
+ decoder_start_token_id=0,
124
+ scale_embedding=False,
125
+ pad_token_id=0,
126
+ eos_token_id=1,
127
+ forced_eos_token_id=1,
128
+ **kwargs,
129
+ ):
130
+ self.vocab_size = vocab_size
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.d_model = d_model
133
+ self.encoder_ffn_dim = encoder_ffn_dim
134
+ self.encoder_layers = encoder_layers
135
+ self.encoder_attention_heads = encoder_attention_heads
136
+ self.decoder_ffn_dim = decoder_ffn_dim
137
+ self.decoder_layers = decoder_layers
138
+ self.decoder_attention_heads = decoder_attention_heads
139
+ self.dropout = dropout
140
+ self.attention_dropout = attention_dropout
141
+ self.activation_dropout = activation_dropout
142
+ self.activation_function = activation_function
143
+ self.init_std = init_std
144
+ self.encoder_layerdrop = encoder_layerdrop
145
+ self.decoder_layerdrop = decoder_layerdrop
146
+ self.use_cache = use_cache
147
+ self.num_hidden_layers = encoder_layers
148
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
149
+ super().__init__(
150
+ pad_token_id=pad_token_id,
151
+ eos_token_id=eos_token_id,
152
+ is_encoder_decoder=is_encoder_decoder,
153
+ decoder_start_token_id=decoder_start_token_id,
154
+ forced_eos_token_id=forced_eos_token_id,
155
+ **kwargs,
156
+ )
157
+
158
+ @property
159
+ def num_attention_heads(self) -> int:
160
+ return self.encoder_attention_heads
161
+
162
+ @property
163
+ def hidden_size(self) -> int:
164
+ return self.d_model
llmeval-env/lib/python3.10/site-packages/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Google and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict
20
+
21
+ import tensorflow as tf
22
+ import torch
23
+ from tqdm import tqdm
24
+
25
+ from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
26
+ from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
27
+
28
+
29
+ PATTERNS = [
30
+ # replace left string with right string to get the relevant state_dict key (identical state dict to bart)
31
+ ["memory_attention", "encoder_attn"],
32
+ ["attention", "attn"],
33
+ ["/", "."],
34
+ [".LayerNorm.gamma", "_layer_norm.weight"],
35
+ [".LayerNorm.beta", "_layer_norm.bias"],
36
+ ["r.layer_", "r.layers."],
37
+ ["output_proj", "out_proj"],
38
+ ["ffn.dense_1.", "fc2."],
39
+ ["ffn.dense.", "fc1."],
40
+ ["ffn_layer_norm", "final_layer_norm"],
41
+ ["kernel", "weight"],
42
+ ["encoder_layer_norm.", "encoder.layer_norm."],
43
+ ["decoder_layer_norm.", "decoder.layer_norm."],
44
+ ["embeddings.weights", "shared.weight"],
45
+ ]
46
+
47
+
48
+ def rename_state_dict_key(k):
49
+ for pegasus_name, hf_name in PATTERNS:
50
+ k = k.replace(pegasus_name, hf_name)
51
+ return k
52
+
53
+
54
+ # See appendix C of paper for all hyperparams
55
+
56
+
57
+ def convert_pegasus(tf_weights: dict, cfg_updates: dict) -> PegasusForConditionalGeneration:
58
+ cfg_kwargs = DEFAULTS.copy()
59
+ cfg_kwargs.update(cfg_updates)
60
+ cfg = PegasusConfig(**cfg_kwargs)
61
+ torch_model = PegasusForConditionalGeneration(cfg)
62
+ sd = torch_model.model.state_dict()
63
+ mapping = {}
64
+ for k, v in tf_weights.items():
65
+ new_k = rename_state_dict_key(k)
66
+ if new_k not in sd:
67
+ raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
68
+
69
+ if "dense" in k or "proj" in new_k:
70
+ v = v.T
71
+ mapping[new_k] = torch.tensor(v, dtype=sd[new_k].dtype)
72
+ assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
73
+ # make sure embedding.padding_idx is respected
74
+ mapping["shared.weight"][cfg.pad_token_id] = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1])
75
+ mapping["encoder.embed_tokens.weight"] = mapping["shared.weight"]
76
+ mapping["decoder.embed_tokens.weight"] = mapping["shared.weight"]
77
+ empty_biases = {k: torch.zeros_like(v) for k, v in sd.items() if k.endswith("bias") and k not in mapping}
78
+ mapping.update(**empty_biases)
79
+ missing, extra = torch_model.model.load_state_dict(mapping, strict=False)
80
+ unexpected_missing = [
81
+ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
82
+ ]
83
+ assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
84
+ assert extra == [], f"no matches found for the following tf keys {extra}"
85
+ return torch_model
86
+
87
+
88
+ def get_tf_weights_as_numpy(path="./ckpt/aeslc/model.ckpt-32000") -> Dict:
89
+ init_vars = tf.train.list_variables(path)
90
+ tf_weights = {}
91
+ ignore_name = ["Adafactor", "global_step"]
92
+ for name, shape in tqdm(init_vars, desc="converting tf checkpoint to dict"):
93
+ skip_key = any(pat in name for pat in ignore_name)
94
+ if skip_key:
95
+ continue
96
+ array = tf.train.load_variable(path, name)
97
+ tf_weights[name] = array
98
+ return tf_weights
99
+
100
+
101
+ def convert_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str):
102
+ # save tokenizer first
103
+ dataset = Path(ckpt_path).parent.name
104
+ desired_max_model_length = task_specific_params[f"summarization_{dataset}"]["max_position_embeddings"]
105
+ tok = PegasusTokenizer.from_pretrained("sshleifer/pegasus", model_max_length=desired_max_model_length)
106
+ assert tok.model_max_length == desired_max_model_length
107
+ tok.save_pretrained(save_dir)
108
+
109
+ # convert model
110
+ tf_weights = get_tf_weights_as_numpy(ckpt_path)
111
+ cfg_updates = task_specific_params[f"summarization_{dataset}"]
112
+ if dataset == "large":
113
+ cfg_updates["task_specific_params"] = task_specific_params
114
+ torch_model = convert_pegasus(tf_weights, cfg_updates)
115
+ torch_model.save_pretrained(save_dir)
116
+ sd = torch_model.state_dict()
117
+ sd.pop("model.decoder.embed_positions.weight")
118
+ sd.pop("model.encoder.embed_positions.weight")
119
+ torch.save(sd, Path(save_dir) / "pytorch_model.bin")
120
+
121
+
122
+ if __name__ == "__main__":
123
+ parser = argparse.ArgumentParser()
124
+ # Required parameters
125
+ parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
126
+ parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
127
+ args = parser.parse_args()
128
+ if args.save_dir is None:
129
+ dataset = Path(args.tf_ckpt_path).parent.name
130
+ args.save_dir = os.path.join("pegasus", dataset)
131
+ convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)