applied-ai-018 commited on
Commit
c6434cd
·
verified ·
1 Parent(s): 1e1c2c5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/configuration_deta.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_resnet_to_pytorch.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_swin_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/image_processing_deta.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/modeling_deta.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/deta/configuration_deta.py +271 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py +249 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__init__.py +70 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/__init__.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/configuration_mega.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/modeling_mega.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/mega/configuration_mega.py +242 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py +291 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/mega/modeling_mega.py +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__init__.py +68 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__pycache__/__init__.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__pycache__/configuration_mra.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__pycache__/convert_mra_pytorch_to_pytorch.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__pycache__/modeling_mra.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/mra/configuration_mra.py +137 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/mra/convert_mra_pytorch_to_pytorch.py +110 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/mra/modeling_mra.py +1480 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/__init__.py +69 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/configuration_nezha.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/modeling_nezha.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/configuration_nezha.py +103 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/modeling_nezha.py +1693 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__init__.py +96 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/__init__.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/configuration_perceiver.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/convert_perceiver_haiku_to_pytorch.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/feature_extraction_perceiver.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/image_processing_perceiver.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/tokenization_perceiver.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/configuration_perceiver.py +244 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py +468 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/feature_extraction_perceiver.py +33 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/image_processing_perceiver.py +367 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/modeling_perceiver.py +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/tokenization_perceiver.py +198 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__init__.py +93 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/__init__.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/modeling_squeezebert.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/configuration_squeezebert.py +166 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/modeling_squeezebert.py +1087 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/tokenization_squeezebert.py +503 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/tokenization_squeezebert_fast.py +173 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/configuration_deta.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_resnet_to_pytorch.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/convert_deta_swin_to_pytorch.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/image_processing_deta.cpython-310.pyc ADDED
Binary file (39.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/__pycache__/modeling_deta.cpython-310.pyc ADDED
Binary file (100 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/deta/configuration_deta.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DETA model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ..auto import CONFIG_MAPPING
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class DetaConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the DETA
34
+ [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
41
+ The configuration of the backbone model.
42
+ backbone (`str`, *optional*):
43
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
44
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
45
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
46
+ use_pretrained_backbone (`bool`, *optional*, `False`):
47
+ Whether to use pretrained weights for the backbone.
48
+ use_timm_backbone (`bool`, *optional*, `False`):
49
+ Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
50
+ library.
51
+ backbone_kwargs (`dict`, *optional*):
52
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
53
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
54
+ num_queries (`int`, *optional*, defaults to 900):
55
+ Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can
56
+ detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead.
57
+ d_model (`int`, *optional*, defaults to 256):
58
+ Dimension of the layers.
59
+ encoder_layers (`int`, *optional*, defaults to 6):
60
+ Number of encoder layers.
61
+ decoder_layers (`int`, *optional*, defaults to 6):
62
+ Number of decoder layers.
63
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
64
+ Number of attention heads for each attention layer in the Transformer encoder.
65
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
66
+ Number of attention heads for each attention layer in the Transformer decoder.
67
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
68
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
69
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
70
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
71
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
72
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
73
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
74
+ dropout (`float`, *optional*, defaults to 0.1):
75
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
76
+ attention_dropout (`float`, *optional*, defaults to 0.0):
77
+ The dropout ratio for the attention probabilities.
78
+ activation_dropout (`float`, *optional*, defaults to 0.0):
79
+ The dropout ratio for activations inside the fully connected layer.
80
+ init_std (`float`, *optional*, defaults to 0.02):
81
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
82
+ init_xavier_std (`float`, *optional*, defaults to 1):
83
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
84
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
85
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
86
+ for more details.
87
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
88
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
89
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
90
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
91
+ class_cost (`float`, *optional*, defaults to 1):
92
+ Relative weight of the classification error in the Hungarian matching cost.
93
+ bbox_cost (`float`, *optional*, defaults to 5):
94
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
95
+ giou_cost (`float`, *optional*, defaults to 2):
96
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
97
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
98
+ Relative weight of the Focal loss in the panoptic segmentation loss.
99
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
100
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
101
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
102
+ Relative weight of the L1 bounding box loss in the object detection loss.
103
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
104
+ Relative weight of the generalized IoU loss in the object detection loss.
105
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
106
+ Relative classification weight of the 'no-object' class in the object detection loss.
107
+ num_feature_levels (`int`, *optional*, defaults to 5):
108
+ The number of input feature levels.
109
+ encoder_n_points (`int`, *optional*, defaults to 4):
110
+ The number of sampled keys in each feature level for each attention head in the encoder.
111
+ decoder_n_points (`int`, *optional*, defaults to 4):
112
+ The number of sampled keys in each feature level for each attention head in the decoder.
113
+ two_stage (`bool`, *optional*, defaults to `True`):
114
+ Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
115
+ DETA, which are further fed into the decoder for iterative bounding box refinement.
116
+ two_stage_num_proposals (`int`, *optional*, defaults to 300):
117
+ The number of region proposals to be generated, in case `two_stage` is set to `True`.
118
+ with_box_refine (`bool`, *optional*, defaults to `True`):
119
+ Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
120
+ based on the predictions from the previous layer.
121
+ focal_alpha (`float`, *optional*, defaults to 0.25):
122
+ Alpha parameter in the focal loss.
123
+ assign_first_stage (`bool`, *optional*, defaults to `True`):
124
+ Whether to assign each prediction i to the highest overlapping ground truth object if the overlap is larger than a threshold 0.7.
125
+ assign_second_stage (`bool`, *optional*, defaults to `True`):
126
+ Whether to assign second assignment procedure in the second stage closely follows the first stage assignment procedure.
127
+ disable_custom_kernels (`bool`, *optional*, defaults to `True`):
128
+ Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
129
+ kernels are not supported by PyTorch ONNX export.
130
+
131
+ Examples:
132
+
133
+ ```python
134
+ >>> from transformers import DetaConfig, DetaModel
135
+
136
+ >>> # Initializing a DETA SenseTime/deformable-detr style configuration
137
+ >>> configuration = DetaConfig()
138
+
139
+ >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
140
+ >>> model = DetaModel(configuration)
141
+
142
+ >>> # Accessing the model configuration
143
+ >>> configuration = model.config
144
+ ```"""
145
+
146
+ model_type = "deta"
147
+ attribute_map = {
148
+ "hidden_size": "d_model",
149
+ "num_attention_heads": "encoder_attention_heads",
150
+ }
151
+
152
+ def __init__(
153
+ self,
154
+ backbone_config=None,
155
+ backbone=None,
156
+ use_pretrained_backbone=False,
157
+ use_timm_backbone=False,
158
+ backbone_kwargs=None,
159
+ num_queries=900,
160
+ max_position_embeddings=2048,
161
+ encoder_layers=6,
162
+ encoder_ffn_dim=2048,
163
+ encoder_attention_heads=8,
164
+ decoder_layers=6,
165
+ decoder_ffn_dim=1024,
166
+ decoder_attention_heads=8,
167
+ encoder_layerdrop=0.0,
168
+ is_encoder_decoder=True,
169
+ activation_function="relu",
170
+ d_model=256,
171
+ dropout=0.1,
172
+ attention_dropout=0.0,
173
+ activation_dropout=0.0,
174
+ init_std=0.02,
175
+ init_xavier_std=1.0,
176
+ return_intermediate=True,
177
+ auxiliary_loss=False,
178
+ position_embedding_type="sine",
179
+ num_feature_levels=5,
180
+ encoder_n_points=4,
181
+ decoder_n_points=4,
182
+ two_stage=True,
183
+ two_stage_num_proposals=300,
184
+ with_box_refine=True,
185
+ assign_first_stage=True,
186
+ assign_second_stage=True,
187
+ class_cost=1,
188
+ bbox_cost=5,
189
+ giou_cost=2,
190
+ mask_loss_coefficient=1,
191
+ dice_loss_coefficient=1,
192
+ bbox_loss_coefficient=5,
193
+ giou_loss_coefficient=2,
194
+ eos_coefficient=0.1,
195
+ focal_alpha=0.25,
196
+ disable_custom_kernels=True,
197
+ **kwargs,
198
+ ):
199
+ if use_pretrained_backbone:
200
+ raise ValueError("Pretrained backbones are not supported yet.")
201
+
202
+ if backbone_config is not None and backbone is not None:
203
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
204
+
205
+ if backbone_config is None and backbone is None:
206
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
207
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
208
+ else:
209
+ if isinstance(backbone_config, dict):
210
+ backbone_model_type = backbone_config.pop("model_type")
211
+ config_class = CONFIG_MAPPING[backbone_model_type]
212
+ backbone_config = config_class.from_dict(backbone_config)
213
+
214
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
215
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
216
+
217
+ self.backbone_config = backbone_config
218
+ self.backbone = backbone
219
+ self.use_pretrained_backbone = use_pretrained_backbone
220
+ self.use_timm_backbone = use_timm_backbone
221
+ self.backbone_kwargs = backbone_kwargs
222
+ self.num_queries = num_queries
223
+ self.max_position_embeddings = max_position_embeddings
224
+ self.d_model = d_model
225
+ self.encoder_ffn_dim = encoder_ffn_dim
226
+ self.encoder_layers = encoder_layers
227
+ self.encoder_attention_heads = encoder_attention_heads
228
+ self.decoder_ffn_dim = decoder_ffn_dim
229
+ self.decoder_layers = decoder_layers
230
+ self.decoder_attention_heads = decoder_attention_heads
231
+ self.dropout = dropout
232
+ self.attention_dropout = attention_dropout
233
+ self.activation_dropout = activation_dropout
234
+ self.activation_function = activation_function
235
+ self.init_std = init_std
236
+ self.init_xavier_std = init_xavier_std
237
+ self.encoder_layerdrop = encoder_layerdrop
238
+ self.auxiliary_loss = auxiliary_loss
239
+ self.position_embedding_type = position_embedding_type
240
+ # deformable attributes
241
+ self.num_feature_levels = num_feature_levels
242
+ self.encoder_n_points = encoder_n_points
243
+ self.decoder_n_points = decoder_n_points
244
+ self.two_stage = two_stage
245
+ self.two_stage_num_proposals = two_stage_num_proposals
246
+ self.with_box_refine = with_box_refine
247
+ self.assign_first_stage = assign_first_stage
248
+ self.assign_second_stage = assign_second_stage
249
+ if two_stage is True and with_box_refine is False:
250
+ raise ValueError("If two_stage is True, with_box_refine must be True.")
251
+ # Hungarian matcher
252
+ self.class_cost = class_cost
253
+ self.bbox_cost = bbox_cost
254
+ self.giou_cost = giou_cost
255
+ # Loss coefficients
256
+ self.mask_loss_coefficient = mask_loss_coefficient
257
+ self.dice_loss_coefficient = dice_loss_coefficient
258
+ self.bbox_loss_coefficient = bbox_loss_coefficient
259
+ self.giou_loss_coefficient = giou_loss_coefficient
260
+ self.eos_coefficient = eos_coefficient
261
+ self.focal_alpha = focal_alpha
262
+ self.disable_custom_kernels = disable_custom_kernels
263
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
264
+
265
+ @property
266
+ def num_attention_heads(self) -> int:
267
+ return self.encoder_attention_heads
268
+
269
+ @property
270
+ def hidden_size(self) -> int:
271
+ return self.d_model
llmeval-env/lib/python3.10/site-packages/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Hubert checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ import os
21
+
22
+ import fairseq
23
+ import torch
24
+ from fairseq.data import Dictionary
25
+
26
+ from transformers import (
27
+ HubertConfig,
28
+ HubertForCTC,
29
+ HubertModel,
30
+ Wav2Vec2CTCTokenizer,
31
+ Wav2Vec2FeatureExtractor,
32
+ Wav2Vec2Processor,
33
+ logging,
34
+ )
35
+
36
+
37
+ logging.set_verbosity_info()
38
+ logger = logging.get_logger(__name__)
39
+
40
+ MAPPING = {
41
+ "post_extract_proj": "feature_projection.projection",
42
+ "encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
43
+ "self_attn.k_proj": "encoder.layers.*.attention.k_proj",
44
+ "self_attn.v_proj": "encoder.layers.*.attention.v_proj",
45
+ "self_attn.q_proj": "encoder.layers.*.attention.q_proj",
46
+ "self_attn.out_proj": "encoder.layers.*.attention.out_proj",
47
+ "self_attn_layer_norm": "encoder.layers.*.layer_norm",
48
+ "fc1": "encoder.layers.*.feed_forward.intermediate_dense",
49
+ "fc2": "encoder.layers.*.feed_forward.output_dense",
50
+ "final_layer_norm": "encoder.layers.*.final_layer_norm",
51
+ "encoder.layer_norm": "encoder.layer_norm",
52
+ "w2v_model.layer_norm": "feature_projection.layer_norm",
53
+ "w2v_encoder.proj": "lm_head",
54
+ "mask_emb": "masked_spec_embed",
55
+ }
56
+
57
+
58
+ def set_recursively(hf_pointer, key, value, full_name, weight_type):
59
+ for attribute in key.split("."):
60
+ hf_pointer = getattr(hf_pointer, attribute)
61
+
62
+ if weight_type is not None:
63
+ hf_shape = getattr(hf_pointer, weight_type).shape
64
+ else:
65
+ hf_shape = hf_pointer.shape
66
+
67
+ assert hf_shape == value.shape, (
68
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
69
+ f" {value.shape} for {full_name}"
70
+ )
71
+
72
+ if weight_type == "weight":
73
+ hf_pointer.weight.data = value
74
+ elif weight_type == "weight_g":
75
+ hf_pointer.weight_g.data = value
76
+ elif weight_type == "weight_v":
77
+ hf_pointer.weight_v.data = value
78
+ elif weight_type == "bias":
79
+ hf_pointer.bias.data = value
80
+ else:
81
+ hf_pointer.data = value
82
+
83
+ logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
84
+
85
+
86
+ def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
87
+ unused_weights = []
88
+ fairseq_dict = fairseq_model.state_dict()
89
+
90
+ feature_extractor = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
91
+
92
+ for name, value in fairseq_dict.items():
93
+ is_used = False
94
+ if "conv_layers" in name:
95
+ load_conv_layer(
96
+ name,
97
+ value,
98
+ feature_extractor,
99
+ unused_weights,
100
+ hf_model.config.feat_extract_norm == "group",
101
+ )
102
+ is_used = True
103
+ else:
104
+ for key, mapped_key in MAPPING.items():
105
+ mapped_key = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
106
+
107
+ if key in name or (key.split("w2v_model.")[-1] == name.split(".")[0] and not is_finetuned):
108
+ is_used = True
109
+ if "*" in mapped_key:
110
+ layer_index = name.split(key)[0].split(".")[-2]
111
+ mapped_key = mapped_key.replace("*", layer_index)
112
+ if "weight_g" in name:
113
+ weight_type = "weight_g"
114
+ elif "weight_v" in name:
115
+ weight_type = "weight_v"
116
+ elif "weight" in name:
117
+ weight_type = "weight"
118
+ elif "bias" in name:
119
+ weight_type = "bias"
120
+ else:
121
+ weight_type = None
122
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
123
+ continue
124
+ if not is_used:
125
+ unused_weights.append(name)
126
+
127
+ logger.warning(f"Unused weights: {unused_weights}")
128
+
129
+
130
+ def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
131
+ name = full_name.split("conv_layers.")[-1]
132
+ items = name.split(".")
133
+ layer_id = int(items[0])
134
+ type_id = int(items[1])
135
+
136
+ if type_id == 0:
137
+ if "bias" in name:
138
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
139
+ f"{full_name} has size {value.shape}, but"
140
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
141
+ )
142
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
143
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
144
+ elif "weight" in name:
145
+ assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
146
+ f"{full_name} has size {value.shape}, but"
147
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
148
+ )
149
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
150
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
151
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
152
+ if "bias" in name:
153
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
154
+ f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
155
+ " found."
156
+ )
157
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
158
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
159
+ elif "weight" in name:
160
+ assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
161
+ f"{full_name} has size {value.shape}, but"
162
+ f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
163
+ )
164
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
165
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
166
+ else:
167
+ unused_weights.append(full_name)
168
+
169
+
170
+ @torch.no_grad()
171
+ def convert_hubert_checkpoint(
172
+ checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True
173
+ ):
174
+ """
175
+ Copy/paste/tweak model's weights to transformers design.
176
+ """
177
+ if config_path is not None:
178
+ config = HubertConfig.from_pretrained(config_path)
179
+ else:
180
+ config = HubertConfig()
181
+
182
+ if is_finetuned:
183
+ if dict_path:
184
+ target_dict = Dictionary.load(dict_path)
185
+
186
+ # important change bos & pad token id since CTC symbol is <pad> and
187
+ # not <s> as in fairseq
188
+ config.bos_token_id = target_dict.pad_index
189
+ config.pad_token_id = target_dict.bos_index
190
+ config.eos_token_id = target_dict.eos_index
191
+ config.vocab_size = len(target_dict.symbols)
192
+ vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json")
193
+ if not os.path.isdir(pytorch_dump_folder_path):
194
+ logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(pytorch_dump_folder_path))
195
+ return
196
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
197
+ with open(vocab_path, "w", encoding="utf-8") as vocab_handle:
198
+ json.dump(target_dict.indices, vocab_handle)
199
+ tokenizer = Wav2Vec2CTCTokenizer(
200
+ vocab_path,
201
+ unk_token=target_dict.unk_word,
202
+ pad_token=target_dict.pad_word,
203
+ bos_token=target_dict.bos_word,
204
+ eos_token=target_dict.eos_word,
205
+ word_delimiter_token="|",
206
+ do_lower_case=False,
207
+ )
208
+ return_attention_mask = True if config.feat_extract_norm == "layer" else False
209
+ feature_extractor = Wav2Vec2FeatureExtractor(
210
+ feature_size=1,
211
+ sampling_rate=16000,
212
+ padding_value=0,
213
+ do_normalize=True,
214
+ return_attention_mask=return_attention_mask,
215
+ )
216
+ processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
217
+ processor.save_pretrained(pytorch_dump_folder_path)
218
+
219
+ hf_wav2vec = HubertForCTC(config)
220
+ else:
221
+ hf_wav2vec = HubertModel(config)
222
+
223
+ if is_finetuned:
224
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
225
+ [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
226
+ )
227
+ else:
228
+ model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
229
+
230
+ model = model[0].eval()
231
+
232
+ recursively_load_weights(model, hf_wav2vec, is_finetuned)
233
+
234
+ hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
235
+
236
+
237
+ if __name__ == "__main__":
238
+ parser = argparse.ArgumentParser()
239
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
240
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
241
+ parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
242
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
243
+ parser.add_argument(
244
+ "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
245
+ )
246
+ args = parser.parse_args()
247
+ convert_hubert_checkpoint(
248
+ args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
249
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_torch_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_mega"] = [
35
+ "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "MegaForCausalLM",
37
+ "MegaForMaskedLM",
38
+ "MegaForMultipleChoice",
39
+ "MegaForQuestionAnswering",
40
+ "MegaForSequenceClassification",
41
+ "MegaForTokenClassification",
42
+ "MegaModel",
43
+ "MegaPreTrainedModel",
44
+ ]
45
+
46
+ if TYPE_CHECKING:
47
+ from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
48
+
49
+ try:
50
+ if not is_torch_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .modeling_mega import (
56
+ MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
57
+ MegaForCausalLM,
58
+ MegaForMaskedLM,
59
+ MegaForMultipleChoice,
60
+ MegaForQuestionAnswering,
61
+ MegaForSequenceClassification,
62
+ MegaForTokenClassification,
63
+ MegaModel,
64
+ MegaPreTrainedModel,
65
+ )
66
+
67
+ else:
68
+ import sys
69
+
70
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/configuration_mega.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/modeling_mega.cpython-310.pyc ADDED
Binary file (69.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mega/configuration_mega.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Mega Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MEGA configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class MegaConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega
33
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
34
+ defaults will yield a similar configuration to that of the Mega
35
+ [mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 30522):
43
+ Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`MegaModel`].
45
+ hidden_size (`int`, *optional*, defaults to 128):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 4):
48
+ Number of hidden layers in the Mega encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 256):
50
+ Dimensionality of the hidden size (self-attention value projection) within the Mega encoder
51
+ ema_projection_size (`int`, *optional*, defaults to 16):
52
+ Dimensionality of the MegaMultiDimensionDampedEma
53
+ bidirectional (`bool`, *optional*, defaults to `True`):
54
+ Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`)
55
+ or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be
56
+ False if you intend to use the model as a decoder.
57
+ shared_representation_size (`int`, *optional*, defaults to 64):
58
+ Dimensionality of the linear projection for shared representation of self-attention queries and keys
59
+ use_chunking (`bool`, *optional*, defaults to `False`):
60
+ Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper)
61
+ chunk_size (`int`, *optional*, defaults to -1):
62
+ If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If
63
+ chunking is used, input sequences must be padded to a multiple of `chunk_size`
64
+ truncation (`int`, *optional*):
65
+ If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma
66
+ normalize_before_mega (`bool`, *optional*, defaults to `True`):
67
+ Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks
68
+ normalization_type (`str`, *optional*, defaults to `"scalenorm"`):
69
+ Type of normalization to use in Mega encoder blocks. Choose one of `"scalenorm"`, `"layernorm"`,
70
+ `"rmsnorm"`, `"batchnorm"`, or `"syncbatchnorm"` (GPU required for syncbatchnorm)
71
+ norm_affine (`bool`, *optional*, defaults to `True`):
72
+ If `True`, applies a parameterized affine transformation to inputs during normalization
73
+ activation (`str`, *optional*, defaults to `"silu"`):
74
+ Activation function to apply within Mega encoder blocks. Choose one of `"silu"`, `"relu"`, `"linear"`,
75
+ `"gelu"`, or `"gelu_accurate"`
76
+ attention_activation (`str`, *optional*, defaults to `"softmax"`):
77
+ Activation function to apply for single-headed self-attention (a la Transformer). Choose one of
78
+ `"softmax"`, `"laplace"`, or `"relu2"`
79
+ dropout_prob (`float`, *optional*, defaults to 0.1):
80
+ The dropout probability for EMA self-attention
81
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
82
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
83
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
84
+ The dropout ratio for the attention probabilities.
85
+ use_feature_dropout (`bool`, *optional*, defaults to `False`):
86
+ Whether to use feature-based (`True`) or standard dropout (`False`)
87
+ use_normalized_ffn (`bool`, *optional*, defaults to `True`):
88
+ Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output
89
+ as-is (`False`)
90
+ nffn_hidden_size (`int`, *optional*, defaults to 256):
91
+ If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this
92
+ is the hidden size of the NFFN
93
+ normalize_before_ffn (`bool`, *optional*, defaults to `True`):
94
+ Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN
95
+ nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1):
96
+ The dropout ratio for the NFFN component.
97
+ max_positions (`int`, *optional*, defaults to 2048):
98
+ The maximum sequence length to use for positional representations. For `"simple"` relative positional bias,
99
+ this is a hard limit on input length; `"rotary"` relative positional bias will extrapolate to longer
100
+ sequences
101
+ add_token_type_embeddings (`bool`, *optional*, defaults to `True`):
102
+ Whether to account for token types in embeddings. Left as optional to maintain compatibility with original
103
+ implementation while adding support for token types.
104
+ type_vocab_size (`int`, *optional*, defaults to 2):
105
+ The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if
106
+ `add_token_type_embeddings = True`
107
+ initializer_range (`float`, *optional*, defaults to 0.02):
108
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
109
+ ema_delta_alpha_range (`float`, *optional*, defaults to 0.2):
110
+ The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in
111
+ MegaMultiDimensionDampedEma.
112
+ ema_beta_range (`float`, *optional*, defaults to 0.02):
113
+ The standard deviation for initializing the beta parameter (expansion matrix) in
114
+ MegaMultiDimensionDampedEma.
115
+ ema_gamma_omega_range (`float`, *optional*, defaults to 1.0):
116
+ The standard deviation for initializing the gamma (projection matrix) and omega (residual weight)
117
+ parameters in MultiDimensionEMA.
118
+ relative_positional_bias (`str`, *optional*, defaults to `"rotary"`):
119
+ Type of relative positional encoding. Choose one of `"rotary"` or `"simple"`. If `"simple"` is selected,
120
+ `max_positions` is used as a limit on input size, while `"rotary"` extrapolates beyond `max_positions`.
121
+ is_decoder (`bool`, *optional*, defaults to `False`):
122
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
123
+ use_cache (`bool`, *optional*, defaults to `True`):
124
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
125
+ relevant if `config.is_decoder=True`.
126
+ classifier_dropout (`float`, *optional*):
127
+ The dropout ratio for the classification head.
128
+ add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`):
129
+ Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass
130
+ hidden states directly to LM head (`False`). Remains optional for compatibility with original
131
+ implementation
132
+
133
+ Examples:
134
+
135
+ ```python
136
+ >>> from transformers import MegaConfig, MegaModel
137
+
138
+ >>> # Initializing a Mega configuration
139
+ >>> configuration = MegaConfig()
140
+
141
+ >>> # Initializing a model (with random weights) from the configuration
142
+ >>> model = MegaModel(configuration)
143
+
144
+ >>> # Accessing the model configuration
145
+ >>> configuration = model.config
146
+ ```"""
147
+
148
+ model_type = "mega"
149
+
150
+ def __init__(
151
+ self,
152
+ vocab_size=30522,
153
+ hidden_size=128,
154
+ num_hidden_layers=4,
155
+ intermediate_size=256,
156
+ ema_projection_size=16,
157
+ bidirectional=True,
158
+ shared_representation_size=64,
159
+ use_chunking=False,
160
+ chunk_size=-1,
161
+ truncation=None,
162
+ normalize_before_mega=True,
163
+ normalization_type="scalenorm",
164
+ norm_affine=True,
165
+ activation="silu",
166
+ attention_activation="softmax",
167
+ dropout_prob=0.1,
168
+ hidden_dropout_prob=0.1,
169
+ attention_probs_dropout_prob=0.1,
170
+ use_feature_dropout=False,
171
+ use_normalized_ffn=True,
172
+ nffn_hidden_size=256,
173
+ normalize_before_ffn=True,
174
+ nffn_activation_dropout_prob=0.1,
175
+ max_positions=2048,
176
+ add_token_type_embeddings=False,
177
+ type_vocab_size=2,
178
+ initializer_range=0.02,
179
+ ema_delta_alpha_range=0.2,
180
+ ema_beta_range=0.02,
181
+ ema_gamma_omega_range=1.0,
182
+ pad_token_id=1,
183
+ bos_token_id=0,
184
+ eos_token_id=2,
185
+ relative_positional_bias="rotary",
186
+ classifier_dropout=None,
187
+ use_cache=True,
188
+ add_lm_hidden_dense_layer=True,
189
+ **kwargs,
190
+ ):
191
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
192
+
193
+ self.vocab_size = vocab_size
194
+ self.hidden_size = hidden_size
195
+ self.num_hidden_layers = num_hidden_layers
196
+ self.activation = activation
197
+ self.attention_activation = attention_activation
198
+ self.intermediate_size = intermediate_size
199
+ self.ema_projection_size = ema_projection_size
200
+ self.bidirectional = bidirectional
201
+ self.shared_representation_size = shared_representation_size
202
+ self.use_chunking = use_chunking
203
+ self.chunk_size = chunk_size
204
+ self.truncation = truncation
205
+ self.normalize_before_mega = normalize_before_mega
206
+ self.normalization_type = normalization_type
207
+ self.norm_affine = norm_affine
208
+ self.dropout_prob = dropout_prob
209
+ self.hidden_dropout_prob = hidden_dropout_prob
210
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
211
+ self.use_feature_dropout = use_feature_dropout
212
+ self.use_normalized_ffn = use_normalized_ffn
213
+ self.nffn_hidden_size = nffn_hidden_size
214
+ self.normalize_before_ffn = normalize_before_ffn
215
+ self.nffn_activation_dropout_prob = nffn_activation_dropout_prob
216
+ self.max_positions = max_positions
217
+ self.add_token_type_embeddings = add_token_type_embeddings
218
+ self.type_vocab_size = type_vocab_size
219
+ self.initializer_range = initializer_range
220
+ self.ema_delta_alpha_range = ema_delta_alpha_range
221
+ self.ema_beta_range = ema_beta_range
222
+ self.ema_gamma_omega_range = ema_gamma_omega_range
223
+ self.relative_positional_bias = relative_positional_bias
224
+ self.use_cache = use_cache
225
+ self.classifier_dropout = classifier_dropout
226
+ self.add_lm_hidden_dense_layer = add_lm_hidden_dense_layer
227
+ self.num_attention_heads = 1 # not used but required by Hugging Face
228
+
229
+
230
+ class MegaOnnxConfig(OnnxConfig):
231
+ @property
232
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
233
+ if self.task == "multiple-choice":
234
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
235
+ else:
236
+ dynamic_axis = {0: "batch", 1: "sequence"}
237
+ return OrderedDict(
238
+ [
239
+ ("input_ids", dynamic_axis),
240
+ ("attention_mask", dynamic_axis),
241
+ ]
242
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Convert Mega pretrained checkpoint. Built to convert the Masked LM checkpoint located at
18
+ https://huggingface.co/mnaylor/mega-wikitext-103
19
+
20
+ Requirements:
21
+ - clone the Mega repo and install fairseq from there
22
+ 1. git clone https://github.com/facebookresearch/mega.git
23
+ 2. cd mega && pip install -e
24
+ - clone the pretrained weights for the original implementation from the hugging face repo
25
+ * use this location as the path for pretrained weights
26
+ """
27
+ import argparse
28
+
29
+ # utilities to import the model weights and config file
30
+ import os
31
+ import pickle as pkl
32
+
33
+ # PyTorch + new model classes
34
+ import torch
35
+ from torch import nn
36
+
37
+ from transformers import AutoTokenizer, MegaConfig, MegaForMaskedLM
38
+
39
+
40
+ # import the EncoderLayer class used to pretrain
41
+ # !! NOTE !! this requires the version of fairseq that is built when you install the Mega source
42
+ try:
43
+ from fairseq.modules.mega_layer import MegaEncoderLayer
44
+ except ImportError:
45
+ raise ImportError("You need to install the version of fairseq from the Mega repo!")
46
+
47
+
48
+ # define the wrapper classes used to train the MLM (see colab notebook below)
49
+ # https://colab.research.google.com/drive/1qfUO6o5HRdxBblWlw058HVyvaEPhPpH8?usp=sharing
50
+ # MegaLM outputs hidden states
51
+ class MegaLM(nn.Module):
52
+ "The base class for our Mega encoder - given input IDs, embed text and return encoder output"
53
+
54
+ def __init__(self, mega_args, depth, vocab_size):
55
+ super().__init__()
56
+ self.mega_args = mega_args
57
+ self.embedding_layer = nn.Embedding(vocab_size, self.mega_args.encoder_embed_dim)
58
+ self.encoders = nn.ModuleList([MegaEncoderLayer(self.mega_args) for _ in range(depth)])
59
+ self.depth = depth
60
+
61
+ def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0):
62
+ """
63
+ Code for a forward pass - expects input_ids and attention_mask to come from a Hugging Face tokenizer as PyTorch
64
+ tensors, and returns a tensor of size (batch, n_classes) containing classification logits
65
+
66
+ Other options:
67
+ - batch_first: boolean indicating whether the batch dimension is first in input_ids (default: True, which
68
+ aligns with the HF tokenizer behavior)
69
+ - ignore_mask_value: the value in attention_mask that identifies tokens that should be ignored (default: 0,
70
+ which aligns with HF tokenizer)
71
+ """
72
+
73
+ # Mega expects embeddings to be (time, batch, embedding size), but
74
+ # Hugging Face returns tokens as (batch, time)
75
+ if batch_first:
76
+ input_ids = input_ids.T
77
+
78
+ # to make things more confusing, Mega expects the attention mask to
79
+ # be (batch, time), but with values of 0 (normal token) and 1 (ignore token)
80
+ # which is the opposite of what HF returns
81
+ if ignore_mask_value == 0:
82
+ attention_mask = 1 - attention_mask
83
+
84
+ # get token embeddings from IDs
85
+ embeds = self.embedding_layer(input_ids)
86
+
87
+ # pass through the Mega layers
88
+ # input is (time, batch, encoder dim) and output is the same
89
+ for encoder in self.encoders:
90
+ embeds = encoder(embeds, attention_mask)
91
+
92
+ # return according to the shape specified
93
+ if batch_first:
94
+ # (T, B, H) --> (B, T, H)
95
+ return torch.transpose(embeds, 0, 1)
96
+ else:
97
+ return embeds
98
+
99
+
100
+ # renamed from MegaForMaskedLM to avoid confusion with new module
101
+ class OriginalMegaForMaskedLM(nn.Module):
102
+ "A wrapper class for doing masked language modeling with Mega"
103
+
104
+ def __init__(self, mega_args, depth, vocab_size):
105
+ super().__init__()
106
+ self.mega = MegaLM(mega_args, depth, vocab_size)
107
+ self.mlm_head = nn.Linear(mega_args.encoder_embed_dim, vocab_size)
108
+ self.dropout = nn.Dropout(p=0.1)
109
+
110
+ def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0):
111
+ """
112
+ Perform a forward pass through the Mega encoder and the masked LM head. Returns logits for each vocabulary
113
+ entry.
114
+
115
+ If `batch_first` (default to align with Hugging Face tokenizer behavior), output will have the shape (Batch
116
+ size, Sequence length, Vocab size); otherwise (S, B, V)
117
+ """
118
+ encoder_output = self.mega(input_ids, attention_mask, batch_first, ignore_mask_value)
119
+ return self.mlm_head(self.dropout(encoder_output))
120
+
121
+
122
+ # code to convert the checkpoint located in the user-specified location
123
+ def convert_checkpoint_to_huggingface(pretrained_checkpoint_path, output_path, includes_tokenizer):
124
+ with open(os.path.join(pretrained_checkpoint_path, "model_args.pkl"), "rb") as f:
125
+ mega_original_args = pkl.load(f)
126
+
127
+ # load the original encoder
128
+ original_mlm = OriginalMegaForMaskedLM(**mega_original_args).eval()
129
+
130
+ # load its weights
131
+ print(
132
+ "Original Mega encoder:",
133
+ original_mlm.mega.load_state_dict(
134
+ torch.load(os.path.join(pretrained_checkpoint_path, "encoder_weights.pt"), map_location="cpu")
135
+ ),
136
+ )
137
+ print(
138
+ "Original Mega MLM layer:",
139
+ original_mlm.mlm_head.load_state_dict(
140
+ torch.load(os.path.join(pretrained_checkpoint_path, "mlm_head_weights.pt"), map_location="cpu")
141
+ ),
142
+ )
143
+
144
+ # create a new config from the old one
145
+ hf_config = MegaConfig(
146
+ num_hidden_layers=mega_original_args["depth"],
147
+ vocab_size=mega_original_args["vocab_size"],
148
+ hidden_size=mega_original_args["mega_args"].encoder_embed_dim,
149
+ shared_representation_size=mega_original_args["mega_args"].encoder_z_dim,
150
+ intermediate_size=mega_original_args["mega_args"].encoder_hidden_dim,
151
+ ema_projection_size=mega_original_args["mega_args"].encoder_n_dim,
152
+ dropout_prob=mega_original_args["mega_args"].dropout,
153
+ attention_probs_dropout_prob=mega_original_args["mega_args"].attention_dropout,
154
+ hidden_dropout_prob=mega_original_args["mega_args"].hidden_dropout,
155
+ activation=mega_original_args["mega_args"].activation_fn,
156
+ attention_activation=mega_original_args["mega_args"].attention_activation_fn,
157
+ bidirectional=mega_original_args["mega_args"].bidirectional,
158
+ use_chunking=mega_original_args["mega_args"].encoder_chunk_size > 0,
159
+ chunk_size=mega_original_args["mega_args"].encoder_chunk_size,
160
+ truncation=mega_original_args["mega_args"].truncation_length,
161
+ normalization_type=mega_original_args["mega_args"].normalization_type,
162
+ normalize_before_mega=True,
163
+ norm_affine=True,
164
+ use_feature_dropout=mega_original_args["mega_args"].feature_dropout,
165
+ relative_positional_bias=mega_original_args["mega_args"].rel_pos_bias,
166
+ max_positions=mega_original_args["mega_args"].max_source_positions,
167
+ nffn_hidden_size=mega_original_args["mega_args"].encoder_ffn_embed_dim,
168
+ normalize_before_ffn=mega_original_args["mega_args"].normalize_before,
169
+ # new arguments added for HF implementation
170
+ nffn_activation_dropout_prob=0.0,
171
+ add_token_type_embeddings=False,
172
+ add_lm_hidden_dense_layer=False,
173
+ )
174
+
175
+ hf_mlm = MegaForMaskedLM(hf_config).eval()
176
+
177
+ # the originl checkpoint just uses nn.Embedding for the word embeddings
178
+ # we use a wrapper module for embeddings to add support for positional embeddings
179
+ hf_mlm.mega.embedding_layer.word_embeddings.weight = original_mlm.mega.embedding_layer.weight
180
+
181
+ # modify the state dictionary of the original checkpoint to account for naming issues in the Hugging Face
182
+ # ecosystem -- any names containing "beta" or "gamma" aren't safe to use and are renamed upon _load_pretrained,
183
+ # also renaming previously confusing parameter names
184
+ original_state_dict = original_mlm.mega.encoders.state_dict()
185
+ updated_keys = {}
186
+ for module_name in original_state_dict.keys():
187
+ new_module_name = None
188
+ # have to handle gamma, beta, and alpha differently due to their use
189
+ # in multiple modules within the original repository;
190
+ # beta is used in EMA, MovingAverageGatedAttention, and RotaryRelativePositionalBias, and must be renamed due to flax/tf weights
191
+ # the EMA sublayer was renamed from "move" to "ema_gate" for readability, so that is also done here
192
+ if "beta" in module_name:
193
+ # EMA sub-layers were always called "move" in the original repo
194
+ if "move.beta" in module_name:
195
+ new_module_name = module_name.replace("move.beta", "ema_gate.ema_expansion_matrix")
196
+ elif "mega_layer.beta" in module_name:
197
+ new_module_name = module_name.replace("beta", "qk_bias")
198
+ else:
199
+ new_module_name = module_name.replace("beta", "b_param")
200
+ # beta is used in EMA and MovingAverageGatedAttention, and must be renamed due to flax/tf weights
201
+ elif "gamma" in module_name:
202
+ if "move.gamma" in module_name:
203
+ new_module_name = module_name.replace("move.gamma", "ema_gate.kernel_projection_matrix")
204
+ elif "mega_layer.gamma" in module_name:
205
+ new_module_name = module_name.replace("gamma", "qk_weight")
206
+ else:
207
+ new_module_name = module_name.replace("gamma", "g_param")
208
+ # alpha is used in EMA and positional bias; renaming to improve readability
209
+ elif "move.alpha" in module_name:
210
+ new_module_name = module_name.replace("move.alpha", "ema_gate.decay_factor")
211
+ # delta is only used in EMA; renaming to improve readability
212
+ elif "move.delta" in module_name:
213
+ new_module_name = module_name.replace("move.delta", "ema_gate.damping_factor")
214
+ # omega is only used in EMA; renaming to improve readability
215
+ elif "omega" in module_name:
216
+ new_module_name = module_name.replace("move.omega", "ema_gate.residual_weight")
217
+
218
+ if new_module_name:
219
+ updated_keys[module_name] = new_module_name
220
+
221
+ if len(updated_keys) != 0:
222
+ print(f"Renaming these keys: {updated_keys.keys()}")
223
+ else:
224
+ print("No need to rename state dict entries")
225
+ for old, new in updated_keys.items():
226
+ original_state_dict[new] = original_state_dict.pop(old)
227
+
228
+ # now attempt to load the state dictionary with updated names
229
+ # note that we now call it `mega.layers` instead of `mega.encoders` due to hugging face style
230
+ print("HF Mega encoder:", hf_mlm.mega.layers.load_state_dict(original_state_dict))
231
+
232
+ # load the MLM head weights directly
233
+ print(
234
+ "HF Mega MLM layer:",
235
+ hf_mlm.mlm_head.load_state_dict(
236
+ torch.load(os.path.join(pretrained_checkpoint_path, "mlm_head_weights.pt"), map_location="cpu")
237
+ ),
238
+ )
239
+
240
+ # test on a randomly generated input sequence
241
+ input_ids = torch.randint(0, hf_config.vocab_size, size=(4, 256))
242
+ input_mask = torch.ones_like(input_ids)
243
+ # mask a few tokens to make sure masking is applied appropriately :)
244
+ input_mask[:, -10:] = 0
245
+
246
+ # run forward passes
247
+ original_output = original_mlm(input_ids, input_mask, batch_first=True, ignore_mask_value=0)
248
+ hf_output = hf_mlm(input_ids, input_mask)[0]
249
+
250
+ # print shapes and diff
251
+ print(f"original output {original_output.shape}")
252
+ print(f"hf output {hf_output.shape}")
253
+ print(f"max diff: {(original_output - hf_output).max()}") # 0.0
254
+ success = torch.allclose(original_output, hf_output, atol=1e-3)
255
+
256
+ if success:
257
+ print("Yay!")
258
+ hf_mlm.save_pretrained(output_path)
259
+ else:
260
+ raise RuntimeError(f"Something's broken :(\nOriginal:\n{original_output}\n\nHF\n{hf_output}\n{hf_mlm}")
261
+
262
+ if includes_tokenizer:
263
+ print("Transferring tokenizer")
264
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_checkpoint_path)
265
+ tokenizer.save_pretrained(output_path)
266
+
267
+
268
+ if __name__ == "__main__":
269
+ parser = argparse.ArgumentParser()
270
+
271
+ parser.add_argument(
272
+ "--pretrained_checkpoint_path",
273
+ default=None,
274
+ type=str,
275
+ required=True,
276
+ help="Point to the directory containing your model weights using the official Mega repo",
277
+ )
278
+
279
+ parser.add_argument(
280
+ "--output_path", default=None, type=str, required=True, help="Location to save the Hugging Face version"
281
+ )
282
+
283
+ parser.add_argument(
284
+ "--includes_tokenizer",
285
+ action="store_true",
286
+ help="Use this flag if there is a Hugging Face tokenizer in the original checkpoint repo",
287
+ )
288
+
289
+ args = parser.parse_args()
290
+
291
+ convert_checkpoint_to_huggingface(args.pretrained_checkpoint_path, args.output_path, args.includes_tokenizer)
llmeval-env/lib/python3.10/site-packages/transformers/models/mega/modeling_mega.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__init__.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ from typing import TYPE_CHECKING
19
+
20
+ # rely on isort to merge the imports
21
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
22
+
23
+
24
+ _import_structure = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
25
+
26
+ try:
27
+ if not is_torch_available():
28
+ raise OptionalDependencyNotAvailable()
29
+ except OptionalDependencyNotAvailable:
30
+ pass
31
+ else:
32
+ _import_structure["modeling_mra"] = [
33
+ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
34
+ "MraForMaskedLM",
35
+ "MraForMultipleChoice",
36
+ "MraForQuestionAnswering",
37
+ "MraForSequenceClassification",
38
+ "MraForTokenClassification",
39
+ "MraLayer",
40
+ "MraModel",
41
+ "MraPreTrainedModel",
42
+ ]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_mra import (
55
+ MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ MraForMaskedLM,
57
+ MraForMultipleChoice,
58
+ MraForQuestionAnswering,
59
+ MraForSequenceClassification,
60
+ MraForTokenClassification,
61
+ MraLayer,
62
+ MraModel,
63
+ MraPreTrainedModel,
64
+ )
65
+ else:
66
+ import sys
67
+
68
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__pycache__/configuration_mra.cpython-310.pyc ADDED
Binary file (5.72 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__pycache__/convert_mra_pytorch_to_pytorch.cpython-310.pyc ADDED
Binary file (2.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mra/__pycache__/modeling_mra.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mra/configuration_mra.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MRA model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class MraConfig(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`MraModel`]. It is used to instantiate an MRA
30
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
31
+ defaults will yield a similar configuration to that of the Mra
32
+ [uw-madison/mra-base-512-4](https://huggingface.co/uw-madison/mra-base-512-4) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 50265):
40
+ Vocabulary size of the Mra model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`MraModel`].
42
+ hidden_size (`int`, *optional*, defaults to 768):
43
+ Dimension of the encoder layers and the pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 12):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 12):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 3072):
49
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
50
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
51
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
52
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
53
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
54
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
55
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout ratio for the attention probabilities.
57
+ max_position_embeddings (`int`, *optional*, defaults to 512):
58
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
59
+ just in case (e.g., 512 or 1024 or 2048).
60
+ type_vocab_size (`int`, *optional*, defaults to 1):
61
+ The vocabulary size of the `token_type_ids` passed when calling [`MraModel`].
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
65
+ The epsilon used by the layer normalization layers.
66
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
67
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`.
68
+ block_per_row (`int`, *optional*, defaults to 4):
69
+ Used to set the budget for the high resolution scale.
70
+ approx_mode (`str`, *optional*, defaults to `"full"`):
71
+ Controls whether both low and high resolution approximations are used. Set to `"full"` for both low and
72
+ high resolution and `"sparse"` for only low resolution.
73
+ initial_prior_first_n_blocks (`int`, *optional*, defaults to 0):
74
+ The initial number of blocks for which high resolution is used.
75
+ initial_prior_diagonal_n_blocks (`int`, *optional*, defaults to 0):
76
+ The number of diagonal blocks for which high resolution is used.
77
+
78
+ Example:
79
+
80
+ ```python
81
+ >>> from transformers import MraConfig, MraModel
82
+
83
+ >>> # Initializing a Mra uw-madison/mra-base-512-4 style configuration
84
+ >>> configuration = MraConfig()
85
+
86
+ >>> # Initializing a model (with random weights) from the uw-madison/mra-base-512-4 style configuration
87
+ >>> model = MraModel(configuration)
88
+
89
+ >>> # Accessing the model configuration
90
+ >>> configuration = model.config
91
+ ```"""
92
+
93
+ model_type = "mra"
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_size=50265,
98
+ hidden_size=768,
99
+ num_hidden_layers=12,
100
+ num_attention_heads=12,
101
+ intermediate_size=3072,
102
+ hidden_act="gelu",
103
+ hidden_dropout_prob=0.1,
104
+ attention_probs_dropout_prob=0.1,
105
+ max_position_embeddings=512,
106
+ type_vocab_size=1,
107
+ initializer_range=0.02,
108
+ layer_norm_eps=1e-5,
109
+ position_embedding_type="absolute",
110
+ block_per_row=4,
111
+ approx_mode="full",
112
+ initial_prior_first_n_blocks=0,
113
+ initial_prior_diagonal_n_blocks=0,
114
+ pad_token_id=1,
115
+ bos_token_id=0,
116
+ eos_token_id=2,
117
+ **kwargs,
118
+ ):
119
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
120
+
121
+ self.vocab_size = vocab_size
122
+ self.max_position_embeddings = max_position_embeddings
123
+ self.hidden_size = hidden_size
124
+ self.num_hidden_layers = num_hidden_layers
125
+ self.num_attention_heads = num_attention_heads
126
+ self.intermediate_size = intermediate_size
127
+ self.hidden_act = hidden_act
128
+ self.hidden_dropout_prob = hidden_dropout_prob
129
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
130
+ self.initializer_range = initializer_range
131
+ self.type_vocab_size = type_vocab_size
132
+ self.layer_norm_eps = layer_norm_eps
133
+ self.position_embedding_type = position_embedding_type
134
+ self.block_per_row = block_per_row
135
+ self.approx_mode = approx_mode
136
+ self.initial_prior_first_n_blocks = initial_prior_first_n_blocks
137
+ self.initial_prior_diagonal_n_blocks = initial_prior_diagonal_n_blocks
llmeval-env/lib/python3.10/site-packages/transformers/models/mra/convert_mra_pytorch_to_pytorch.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MRA checkpoints from the original repository. URL: https://github.com/mlpen/mra-attention"""
16
+
17
+ import argparse
18
+
19
+ import torch
20
+
21
+ from transformers import MraConfig, MraForMaskedLM
22
+
23
+
24
+ def rename_key(orig_key):
25
+ if "model" in orig_key:
26
+ orig_key = orig_key.replace("model.", "")
27
+ if "norm1" in orig_key:
28
+ orig_key = orig_key.replace("norm1", "attention.output.LayerNorm")
29
+ if "norm2" in orig_key:
30
+ orig_key = orig_key.replace("norm2", "output.LayerNorm")
31
+ if "norm" in orig_key:
32
+ orig_key = orig_key.replace("norm", "LayerNorm")
33
+ if "transformer" in orig_key:
34
+ layer_num = orig_key.split(".")[0].split("_")[-1]
35
+ orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}")
36
+ if "mha.attn" in orig_key:
37
+ orig_key = orig_key.replace("mha.attn", "attention.self")
38
+ if "mha" in orig_key:
39
+ orig_key = orig_key.replace("mha", "attention")
40
+ if "W_q" in orig_key:
41
+ orig_key = orig_key.replace("W_q", "self.query")
42
+ if "W_k" in orig_key:
43
+ orig_key = orig_key.replace("W_k", "self.key")
44
+ if "W_v" in orig_key:
45
+ orig_key = orig_key.replace("W_v", "self.value")
46
+ if "ff.0" in orig_key:
47
+ orig_key = orig_key.replace("ff.0", "intermediate.dense")
48
+ if "ff.2" in orig_key:
49
+ orig_key = orig_key.replace("ff.2", "output.dense")
50
+ if "ff" in orig_key:
51
+ orig_key = orig_key.replace("ff", "output.dense")
52
+ if "mlm_class" in orig_key:
53
+ orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder")
54
+ if "mlm" in orig_key:
55
+ orig_key = orig_key.replace("mlm", "cls.predictions.transform")
56
+ if "backbone.backbone.encoders" in orig_key:
57
+ orig_key = orig_key.replace("backbone.backbone.encoders", "encoder.layer")
58
+ if "cls" not in orig_key:
59
+ orig_key = "mra." + orig_key
60
+
61
+ return orig_key
62
+
63
+
64
+ def convert_checkpoint_helper(max_position_embeddings, orig_state_dict):
65
+ for key in orig_state_dict.copy().keys():
66
+ val = orig_state_dict.pop(key)
67
+
68
+ if ("pooler" in key) or ("sen_class" in key):
69
+ continue
70
+ else:
71
+ orig_state_dict[rename_key(key)] = val
72
+
73
+ orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"]
74
+ orig_state_dict["mra.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2
75
+
76
+ return orig_state_dict
77
+
78
+
79
+ def convert_mra_checkpoint(checkpoint_path, mra_config_file, pytorch_dump_path):
80
+ orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model_state_dict"]
81
+ config = MraConfig.from_json_file(mra_config_file)
82
+ model = MraForMaskedLM(config)
83
+
84
+ new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict)
85
+
86
+ print(model.load_state_dict(new_state_dict))
87
+ model.eval()
88
+ model.save_pretrained(pytorch_dump_path)
89
+
90
+ print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
91
+
92
+
93
+ if __name__ == "__main__":
94
+ parser = argparse.ArgumentParser()
95
+ # Required parameters
96
+ parser.add_argument(
97
+ "--pytorch_model_path", default=None, type=str, required=True, help="Path to Mra pytorch checkpoint."
98
+ )
99
+ parser.add_argument(
100
+ "--config_file",
101
+ default=None,
102
+ type=str,
103
+ required=True,
104
+ help="The json file for Mra model config.",
105
+ )
106
+ parser.add_argument(
107
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
108
+ )
109
+ args = parser.parse_args()
110
+ convert_mra_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/mra/modeling_mra.py ADDED
@@ -0,0 +1,1480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch MRA model."""
16
+
17
+
18
+ import math
19
+ from pathlib import Path
20
+ from typing import Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+ from torch.utils.cpp_extension import load
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_outputs import (
30
+ BaseModelOutputWithCrossAttentions,
31
+ MaskedLMOutput,
32
+ MultipleChoiceModelOutput,
33
+ QuestionAnsweringModelOutput,
34
+ SequenceClassifierOutput,
35
+ TokenClassifierOutput,
36
+ )
37
+ from ...modeling_utils import PreTrainedModel
38
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
39
+ from ...utils import (
40
+ add_code_sample_docstrings,
41
+ add_start_docstrings,
42
+ add_start_docstrings_to_model_forward,
43
+ is_ninja_available,
44
+ is_torch_cuda_available,
45
+ logging,
46
+ )
47
+ from .configuration_mra import MraConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "uw-madison/mra-base-512-4"
53
+ _CONFIG_FOR_DOC = "MraConfig"
54
+ _TOKENIZER_FOR_DOC = "AutoTokenizer"
55
+
56
+
57
+ from ..deprecated._archive_maps import MRA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ mra_cuda_kernel = None
61
+
62
+
63
+ def load_cuda_kernels():
64
+ global mra_cuda_kernel
65
+ src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "mra"
66
+
67
+ def append_root(files):
68
+ return [src_folder / file for file in files]
69
+
70
+ src_files = append_root(["cuda_kernel.cu", "cuda_launch.cu", "torch_extension.cpp"])
71
+
72
+ mra_cuda_kernel = load("cuda_kernel", src_files, verbose=True)
73
+
74
+
75
+ def sparse_max(sparse_qk_prod, indices, query_num_block, key_num_block):
76
+ """
77
+ Computes maximum values for softmax stability.
78
+ """
79
+ if len(sparse_qk_prod.size()) != 4:
80
+ raise ValueError("sparse_qk_prod must be a 4-dimensional tensor.")
81
+
82
+ if len(indices.size()) != 2:
83
+ raise ValueError("indices must be a 2-dimensional tensor.")
84
+
85
+ if sparse_qk_prod.size(2) != 32:
86
+ raise ValueError("The size of the second dimension of sparse_qk_prod must be 32.")
87
+
88
+ if sparse_qk_prod.size(3) != 32:
89
+ raise ValueError("The size of the third dimension of sparse_qk_prod must be 32.")
90
+
91
+ index_vals = sparse_qk_prod.max(dim=-2).values.transpose(-1, -2)
92
+ index_vals = index_vals.contiguous()
93
+
94
+ indices = indices.int()
95
+ indices = indices.contiguous()
96
+
97
+ max_vals, max_vals_scatter = mra_cuda_kernel.index_max(index_vals, indices, query_num_block, key_num_block)
98
+ max_vals_scatter = max_vals_scatter.transpose(-1, -2)[:, :, None, :]
99
+
100
+ return max_vals, max_vals_scatter
101
+
102
+
103
+ def sparse_mask(mask, indices, block_size=32):
104
+ """
105
+ Converts attention mask to a sparse mask for high resolution logits.
106
+ """
107
+ if len(mask.size()) != 2:
108
+ raise ValueError("mask must be a 2-dimensional tensor.")
109
+
110
+ if len(indices.size()) != 2:
111
+ raise ValueError("indices must be a 2-dimensional tensor.")
112
+
113
+ if mask.shape[0] != indices.shape[0]:
114
+ raise ValueError("mask and indices must have the same size in the zero-th dimension.")
115
+
116
+ batch_size, seq_len = mask.shape
117
+ num_block = seq_len // block_size
118
+
119
+ batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device)
120
+ mask = mask.reshape(batch_size, num_block, block_size)
121
+ mask = mask[batch_idx[:, None], (indices % num_block).long(), :]
122
+
123
+ return mask
124
+
125
+
126
+ def mm_to_sparse(dense_query, dense_key, indices, block_size=32):
127
+ """
128
+ Performs Sampled Dense Matrix Multiplication.
129
+ """
130
+ batch_size, query_size, dim = dense_query.size()
131
+ _, key_size, dim = dense_key.size()
132
+
133
+ if query_size % block_size != 0:
134
+ raise ValueError("query_size (size of first dimension of dense_query) must be divisible by block_size.")
135
+
136
+ if key_size % block_size != 0:
137
+ raise ValueError("key_size (size of first dimension of dense_key) must be divisible by block_size.")
138
+
139
+ dense_query = dense_query.reshape(batch_size, query_size // block_size, block_size, dim).transpose(-1, -2)
140
+ dense_key = dense_key.reshape(batch_size, key_size // block_size, block_size, dim).transpose(-1, -2)
141
+
142
+ if len(dense_query.size()) != 4:
143
+ raise ValueError("dense_query must be a 4-dimensional tensor.")
144
+
145
+ if len(dense_key.size()) != 4:
146
+ raise ValueError("dense_key must be a 4-dimensional tensor.")
147
+
148
+ if len(indices.size()) != 2:
149
+ raise ValueError("indices must be a 2-dimensional tensor.")
150
+
151
+ if dense_query.size(3) != 32:
152
+ raise ValueError("The third dimension of dense_query must be 32.")
153
+
154
+ if dense_key.size(3) != 32:
155
+ raise ValueError("The third dimension of dense_key must be 32.")
156
+
157
+ dense_query = dense_query.contiguous()
158
+ dense_key = dense_key.contiguous()
159
+
160
+ indices = indices.int()
161
+ indices = indices.contiguous()
162
+
163
+ return mra_cuda_kernel.mm_to_sparse(dense_query, dense_key, indices.int())
164
+
165
+
166
+ def sparse_dense_mm(sparse_query, indices, dense_key, query_num_block, block_size=32):
167
+ """
168
+ Performs matrix multiplication of a sparse matrix with a dense matrix.
169
+ """
170
+ batch_size, key_size, dim = dense_key.size()
171
+
172
+ if key_size % block_size != 0:
173
+ raise ValueError("key_size (size of first dimension of dense_key) must be divisible by block_size.")
174
+
175
+ if sparse_query.size(2) != block_size:
176
+ raise ValueError("The size of the second dimension of sparse_query must be equal to the block_size.")
177
+
178
+ if sparse_query.size(3) != block_size:
179
+ raise ValueError("The size of the third dimension of sparse_query must be equal to the block_size.")
180
+
181
+ dense_key = dense_key.reshape(batch_size, key_size // block_size, block_size, dim).transpose(-1, -2)
182
+
183
+ if len(sparse_query.size()) != 4:
184
+ raise ValueError("sparse_query must be a 4-dimensional tensor.")
185
+
186
+ if len(dense_key.size()) != 4:
187
+ raise ValueError("dense_key must be a 4-dimensional tensor.")
188
+
189
+ if len(indices.size()) != 2:
190
+ raise ValueError("indices must be a 2-dimensional tensor.")
191
+
192
+ if dense_key.size(3) != 32:
193
+ raise ValueError("The size of the third dimension of dense_key must be 32.")
194
+
195
+ sparse_query = sparse_query.contiguous()
196
+
197
+ indices = indices.int()
198
+ indices = indices.contiguous()
199
+ dense_key = dense_key.contiguous()
200
+
201
+ dense_qk_prod = mra_cuda_kernel.sparse_dense_mm(sparse_query, indices, dense_key, query_num_block)
202
+ dense_qk_prod = dense_qk_prod.transpose(-1, -2).reshape(batch_size, query_num_block * block_size, dim)
203
+ return dense_qk_prod
204
+
205
+
206
+ def transpose_indices(indices, dim_1_block, dim_2_block):
207
+ return ((indices % dim_2_block) * dim_1_block + torch.div(indices, dim_2_block, rounding_mode="floor")).long()
208
+
209
+
210
+ class MraSampledDenseMatMul(torch.autograd.Function):
211
+ @staticmethod
212
+ def forward(ctx, dense_query, dense_key, indices, block_size):
213
+ sparse_qk_prod = mm_to_sparse(dense_query, dense_key, indices, block_size)
214
+ ctx.save_for_backward(dense_query, dense_key, indices)
215
+ ctx.block_size = block_size
216
+ return sparse_qk_prod
217
+
218
+ @staticmethod
219
+ def backward(ctx, grad):
220
+ dense_query, dense_key, indices = ctx.saved_tensors
221
+ block_size = ctx.block_size
222
+ query_num_block = dense_query.size(1) // block_size
223
+ key_num_block = dense_key.size(1) // block_size
224
+ indices_T = transpose_indices(indices, query_num_block, key_num_block)
225
+ grad_key = sparse_dense_mm(grad.transpose(-1, -2), indices_T, dense_query, key_num_block)
226
+ grad_query = sparse_dense_mm(grad, indices, dense_key, query_num_block)
227
+ return grad_query, grad_key, None, None
228
+
229
+ @staticmethod
230
+ def operator_call(dense_query, dense_key, indices, block_size=32):
231
+ return MraSampledDenseMatMul.apply(dense_query, dense_key, indices, block_size)
232
+
233
+
234
+ class MraSparseDenseMatMul(torch.autograd.Function):
235
+ @staticmethod
236
+ def forward(ctx, sparse_query, indices, dense_key, query_num_block):
237
+ sparse_qk_prod = sparse_dense_mm(sparse_query, indices, dense_key, query_num_block)
238
+ ctx.save_for_backward(sparse_query, indices, dense_key)
239
+ ctx.query_num_block = query_num_block
240
+ return sparse_qk_prod
241
+
242
+ @staticmethod
243
+ def backward(ctx, grad):
244
+ sparse_query, indices, dense_key = ctx.saved_tensors
245
+ query_num_block = ctx.query_num_block
246
+ key_num_block = dense_key.size(1) // sparse_query.size(-1)
247
+ indices_T = transpose_indices(indices, query_num_block, key_num_block)
248
+ grad_key = sparse_dense_mm(sparse_query.transpose(-1, -2), indices_T, grad, key_num_block)
249
+ grad_query = mm_to_sparse(grad, dense_key, indices)
250
+ return grad_query, None, grad_key, None
251
+
252
+ @staticmethod
253
+ def operator_call(sparse_query, indices, dense_key, query_num_block):
254
+ return MraSparseDenseMatMul.apply(sparse_query, indices, dense_key, query_num_block)
255
+
256
+
257
+ class MraReduceSum:
258
+ @staticmethod
259
+ def operator_call(sparse_query, indices, query_num_block, key_num_block):
260
+ batch_size, num_block, block_size, _ = sparse_query.size()
261
+
262
+ if len(sparse_query.size()) != 4:
263
+ raise ValueError("sparse_query must be a 4-dimensional tensor.")
264
+
265
+ if len(indices.size()) != 2:
266
+ raise ValueError("indices must be a 2-dimensional tensor.")
267
+
268
+ _, _, block_size, _ = sparse_query.size()
269
+ batch_size, num_block = indices.size()
270
+
271
+ sparse_query = sparse_query.sum(dim=2).reshape(batch_size * num_block, block_size)
272
+
273
+ batch_idx = torch.arange(indices.size(0), dtype=torch.long, device=indices.device)
274
+ global_idxes = (
275
+ torch.div(indices, key_num_block, rounding_mode="floor").long() + batch_idx[:, None] * query_num_block
276
+ ).reshape(batch_size * num_block)
277
+ temp = torch.zeros(
278
+ (batch_size * query_num_block, block_size), dtype=sparse_query.dtype, device=sparse_query.device
279
+ )
280
+ output = temp.index_add(0, global_idxes, sparse_query).reshape(batch_size, query_num_block, block_size)
281
+
282
+ output = output.reshape(batch_size, query_num_block * block_size)
283
+ return output
284
+
285
+
286
+ def get_low_resolution_logit(query, key, block_size, mask=None, value=None):
287
+ """
288
+ Compute low resolution approximation.
289
+ """
290
+ batch_size, seq_len, head_dim = query.size()
291
+
292
+ num_block_per_row = seq_len // block_size
293
+
294
+ value_hat = None
295
+ if mask is not None:
296
+ token_count = mask.reshape(batch_size, num_block_per_row, block_size).sum(dim=-1)
297
+ query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / (
298
+ token_count[:, :, None] + 1e-6
299
+ )
300
+ key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / (
301
+ token_count[:, :, None] + 1e-6
302
+ )
303
+ if value is not None:
304
+ value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).sum(dim=-2) / (
305
+ token_count[:, :, None] + 1e-6
306
+ )
307
+ else:
308
+ token_count = block_size * torch.ones(batch_size, num_block_per_row, dtype=torch.float, device=query.device)
309
+ query_hat = query.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2)
310
+ key_hat = key.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2)
311
+ if value is not None:
312
+ value_hat = value.reshape(batch_size, num_block_per_row, block_size, head_dim).mean(dim=-2)
313
+
314
+ low_resolution_logit = torch.matmul(query_hat, key_hat.transpose(-1, -2)) / math.sqrt(head_dim)
315
+
316
+ low_resolution_logit_row_max = low_resolution_logit.max(dim=-1, keepdims=True).values
317
+
318
+ if mask is not None:
319
+ low_resolution_logit = (
320
+ low_resolution_logit - 1e4 * ((token_count[:, None, :] * token_count[:, :, None]) < 0.5).float()
321
+ )
322
+
323
+ return low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat
324
+
325
+
326
+ def get_block_idxes(
327
+ low_resolution_logit, num_blocks, approx_mode, initial_prior_first_n_blocks, initial_prior_diagonal_n_blocks
328
+ ):
329
+ """
330
+ Compute the indices of the subset of components to be used in the approximation.
331
+ """
332
+ batch_size, total_blocks_per_row, _ = low_resolution_logit.shape
333
+
334
+ if initial_prior_diagonal_n_blocks > 0:
335
+ offset = initial_prior_diagonal_n_blocks // 2
336
+ temp_mask = torch.ones(total_blocks_per_row, total_blocks_per_row, device=low_resolution_logit.device)
337
+ diagonal_mask = torch.tril(torch.triu(temp_mask, diagonal=-offset), diagonal=offset)
338
+ low_resolution_logit = low_resolution_logit + diagonal_mask[None, :, :] * 5e3
339
+
340
+ if initial_prior_first_n_blocks > 0:
341
+ low_resolution_logit[:, :initial_prior_first_n_blocks, :] = (
342
+ low_resolution_logit[:, :initial_prior_first_n_blocks, :] + 5e3
343
+ )
344
+ low_resolution_logit[:, :, :initial_prior_first_n_blocks] = (
345
+ low_resolution_logit[:, :, :initial_prior_first_n_blocks] + 5e3
346
+ )
347
+
348
+ top_k_vals = torch.topk(
349
+ low_resolution_logit.reshape(batch_size, -1), num_blocks, dim=-1, largest=True, sorted=False
350
+ )
351
+ indices = top_k_vals.indices
352
+
353
+ if approx_mode == "full":
354
+ threshold = top_k_vals.values.min(dim=-1).values
355
+ high_resolution_mask = (low_resolution_logit >= threshold[:, None, None]).float()
356
+ elif approx_mode == "sparse":
357
+ high_resolution_mask = None
358
+ else:
359
+ raise ValueError(f"{approx_mode} is not a valid approx_model value.")
360
+
361
+ return indices, high_resolution_mask
362
+
363
+
364
+ def mra2_attention(
365
+ query,
366
+ key,
367
+ value,
368
+ mask,
369
+ num_blocks,
370
+ approx_mode,
371
+ block_size=32,
372
+ initial_prior_first_n_blocks=0,
373
+ initial_prior_diagonal_n_blocks=0,
374
+ ):
375
+ """
376
+ Use Mra to approximate self-attention.
377
+ """
378
+ if mra_cuda_kernel is None:
379
+ return torch.zeros_like(query).requires_grad_()
380
+
381
+ batch_size, num_head, seq_len, head_dim = query.size()
382
+ meta_batch = batch_size * num_head
383
+
384
+ if seq_len % block_size != 0:
385
+ raise ValueError("sequence length must be divisible by the block_size.")
386
+
387
+ num_block_per_row = seq_len // block_size
388
+
389
+ query = query.reshape(meta_batch, seq_len, head_dim)
390
+ key = key.reshape(meta_batch, seq_len, head_dim)
391
+ value = value.reshape(meta_batch, seq_len, head_dim)
392
+
393
+ if mask is not None:
394
+ query = query * mask[:, :, None]
395
+ key = key * mask[:, :, None]
396
+ value = value * mask[:, :, None]
397
+
398
+ if approx_mode == "full":
399
+ low_resolution_logit, token_count, low_resolution_logit_row_max, value_hat = get_low_resolution_logit(
400
+ query, key, block_size, mask, value
401
+ )
402
+ elif approx_mode == "sparse":
403
+ with torch.no_grad():
404
+ low_resolution_logit, token_count, low_resolution_logit_row_max, _ = get_low_resolution_logit(
405
+ query, key, block_size, mask
406
+ )
407
+ else:
408
+ raise Exception('approx_mode must be "full" or "sparse"')
409
+
410
+ with torch.no_grad():
411
+ low_resolution_logit_normalized = low_resolution_logit - low_resolution_logit_row_max
412
+ indices, high_resolution_mask = get_block_idxes(
413
+ low_resolution_logit_normalized,
414
+ num_blocks,
415
+ approx_mode,
416
+ initial_prior_first_n_blocks,
417
+ initial_prior_diagonal_n_blocks,
418
+ )
419
+
420
+ high_resolution_logit = MraSampledDenseMatMul.operator_call(
421
+ query, key, indices, block_size=block_size
422
+ ) / math.sqrt(head_dim)
423
+ max_vals, max_vals_scatter = sparse_max(high_resolution_logit, indices, num_block_per_row, num_block_per_row)
424
+ high_resolution_logit = high_resolution_logit - max_vals_scatter
425
+ if mask is not None:
426
+ high_resolution_logit = high_resolution_logit - 1e4 * (1 - sparse_mask(mask, indices)[:, :, :, None])
427
+ high_resolution_attn = torch.exp(high_resolution_logit)
428
+ high_resolution_attn_out = MraSparseDenseMatMul.operator_call(
429
+ high_resolution_attn, indices, value, num_block_per_row
430
+ )
431
+ high_resolution_normalizer = MraReduceSum.operator_call(
432
+ high_resolution_attn, indices, num_block_per_row, num_block_per_row
433
+ )
434
+
435
+ if approx_mode == "full":
436
+ low_resolution_attn = (
437
+ torch.exp(low_resolution_logit - low_resolution_logit_row_max - 1e4 * high_resolution_mask)
438
+ * token_count[:, None, :]
439
+ )
440
+
441
+ low_resolution_attn_out = (
442
+ torch.matmul(low_resolution_attn, value_hat)[:, :, None, :]
443
+ .repeat(1, 1, block_size, 1)
444
+ .reshape(meta_batch, seq_len, head_dim)
445
+ )
446
+ low_resolution_normalizer = (
447
+ low_resolution_attn.sum(dim=-1)[:, :, None].repeat(1, 1, block_size).reshape(meta_batch, seq_len)
448
+ )
449
+
450
+ log_correction = low_resolution_logit_row_max.repeat(1, 1, block_size).reshape(meta_batch, seq_len) - max_vals
451
+ if mask is not None:
452
+ log_correction = log_correction * mask
453
+
454
+ low_resolution_corr = torch.exp(log_correction * (log_correction <= 0).float())
455
+ low_resolution_attn_out = low_resolution_attn_out * low_resolution_corr[:, :, None]
456
+ low_resolution_normalizer = low_resolution_normalizer * low_resolution_corr
457
+
458
+ high_resolution_corr = torch.exp(-log_correction * (log_correction > 0).float())
459
+ high_resolution_attn_out = high_resolution_attn_out * high_resolution_corr[:, :, None]
460
+ high_resolution_normalizer = high_resolution_normalizer * high_resolution_corr
461
+
462
+ context_layer = (high_resolution_attn_out + low_resolution_attn_out) / (
463
+ high_resolution_normalizer[:, :, None] + low_resolution_normalizer[:, :, None] + 1e-6
464
+ )
465
+
466
+ elif approx_mode == "sparse":
467
+ context_layer = high_resolution_attn_out / (high_resolution_normalizer[:, :, None] + 1e-6)
468
+ else:
469
+ raise Exception('config.approx_mode must be "full" or "sparse"')
470
+
471
+ if mask is not None:
472
+ context_layer = context_layer * mask[:, :, None]
473
+
474
+ context_layer = context_layer.reshape(batch_size, num_head, seq_len, head_dim)
475
+
476
+ return context_layer
477
+
478
+
479
+ class MraEmbeddings(nn.Module):
480
+ """Construct the embeddings from word, position and token_type embeddings."""
481
+
482
+ def __init__(self, config):
483
+ super().__init__()
484
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
485
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size)
486
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
487
+
488
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
489
+ # any TensorFlow checkpoint file
490
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
491
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
492
+
493
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
494
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2)
495
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
496
+ self.register_buffer(
497
+ "token_type_ids",
498
+ torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
499
+ persistent=False,
500
+ )
501
+
502
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
503
+ if input_ids is not None:
504
+ input_shape = input_ids.size()
505
+ else:
506
+ input_shape = inputs_embeds.size()[:-1]
507
+
508
+ seq_length = input_shape[1]
509
+
510
+ if position_ids is None:
511
+ position_ids = self.position_ids[:, :seq_length]
512
+
513
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
514
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
515
+ # issue #5664
516
+ if token_type_ids is None:
517
+ if hasattr(self, "token_type_ids"):
518
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
519
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
520
+ token_type_ids = buffered_token_type_ids_expanded
521
+ else:
522
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
523
+
524
+ if inputs_embeds is None:
525
+ inputs_embeds = self.word_embeddings(input_ids)
526
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
527
+
528
+ embeddings = inputs_embeds + token_type_embeddings
529
+ if self.position_embedding_type == "absolute":
530
+ position_embeddings = self.position_embeddings(position_ids)
531
+ embeddings += position_embeddings
532
+ embeddings = self.LayerNorm(embeddings)
533
+ embeddings = self.dropout(embeddings)
534
+ return embeddings
535
+
536
+
537
+ class MraSelfAttention(nn.Module):
538
+ def __init__(self, config, position_embedding_type=None):
539
+ super().__init__()
540
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
541
+ raise ValueError(
542
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
543
+ f"heads ({config.num_attention_heads})"
544
+ )
545
+
546
+ kernel_loaded = mra_cuda_kernel is not None
547
+ if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded:
548
+ try:
549
+ load_cuda_kernels()
550
+ except Exception as e:
551
+ logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}")
552
+
553
+ self.num_attention_heads = config.num_attention_heads
554
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
555
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
556
+
557
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
558
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
559
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
560
+
561
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
562
+ self.position_embedding_type = (
563
+ position_embedding_type if position_embedding_type is not None else config.position_embedding_type
564
+ )
565
+
566
+ self.num_block = (config.max_position_embeddings // 32) * config.block_per_row
567
+ self.num_block = min(self.num_block, int((config.max_position_embeddings // 32) ** 2))
568
+
569
+ self.approx_mode = config.approx_mode
570
+ self.initial_prior_first_n_blocks = config.initial_prior_first_n_blocks
571
+ self.initial_prior_diagonal_n_blocks = config.initial_prior_diagonal_n_blocks
572
+
573
+ def transpose_for_scores(self, layer):
574
+ new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
575
+ layer = layer.view(*new_layer_shape)
576
+ return layer.permute(0, 2, 1, 3)
577
+
578
+ def forward(self, hidden_states, attention_mask=None):
579
+ mixed_query_layer = self.query(hidden_states)
580
+
581
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
582
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
583
+ query_layer = self.transpose_for_scores(mixed_query_layer)
584
+
585
+ batch_size, num_heads, seq_len, head_dim = query_layer.size()
586
+
587
+ # revert changes made by get_extended_attention_mask
588
+ attention_mask = 1.0 + attention_mask / 10000.0
589
+ attention_mask = (
590
+ attention_mask.squeeze().repeat(1, num_heads, 1).reshape(batch_size * num_heads, seq_len).int()
591
+ )
592
+
593
+ # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs
594
+ # smaller than this are padded with zeros.
595
+ gpu_warp_size = 32
596
+
597
+ if head_dim < gpu_warp_size:
598
+ pad_size = batch_size, num_heads, seq_len, gpu_warp_size - head_dim
599
+
600
+ query_layer = torch.cat([query_layer, torch.zeros(pad_size, device=query_layer.device)], dim=-1)
601
+ key_layer = torch.cat([key_layer, torch.zeros(pad_size, device=key_layer.device)], dim=-1)
602
+ value_layer = torch.cat([value_layer, torch.zeros(pad_size, device=value_layer.device)], dim=-1)
603
+
604
+ context_layer = mra2_attention(
605
+ query_layer.float(),
606
+ key_layer.float(),
607
+ value_layer.float(),
608
+ attention_mask.float(),
609
+ self.num_block,
610
+ approx_mode=self.approx_mode,
611
+ initial_prior_first_n_blocks=self.initial_prior_first_n_blocks,
612
+ initial_prior_diagonal_n_blocks=self.initial_prior_diagonal_n_blocks,
613
+ )
614
+
615
+ if head_dim < gpu_warp_size:
616
+ context_layer = context_layer[:, :, :, :head_dim]
617
+
618
+ context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim)
619
+
620
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
621
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
622
+ context_layer = context_layer.view(*new_context_layer_shape)
623
+
624
+ outputs = (context_layer,)
625
+
626
+ return outputs
627
+
628
+
629
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
630
+ class MraSelfOutput(nn.Module):
631
+ def __init__(self, config):
632
+ super().__init__()
633
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
634
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
635
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
636
+
637
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
638
+ hidden_states = self.dense(hidden_states)
639
+ hidden_states = self.dropout(hidden_states)
640
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
641
+ return hidden_states
642
+
643
+
644
+ class MraAttention(nn.Module):
645
+ def __init__(self, config, position_embedding_type=None):
646
+ super().__init__()
647
+ self.self = MraSelfAttention(config, position_embedding_type=position_embedding_type)
648
+ self.output = MraSelfOutput(config)
649
+ self.pruned_heads = set()
650
+
651
+ def prune_heads(self, heads):
652
+ if len(heads) == 0:
653
+ return
654
+ heads, index = find_pruneable_heads_and_indices(
655
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
656
+ )
657
+
658
+ # Prune linear layers
659
+ self.self.query = prune_linear_layer(self.self.query, index)
660
+ self.self.key = prune_linear_layer(self.self.key, index)
661
+ self.self.value = prune_linear_layer(self.self.value, index)
662
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
663
+
664
+ # Update hyper params and store pruned heads
665
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
666
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
667
+ self.pruned_heads = self.pruned_heads.union(heads)
668
+
669
+ def forward(self, hidden_states, attention_mask=None):
670
+ self_outputs = self.self(hidden_states, attention_mask)
671
+ attention_output = self.output(self_outputs[0], hidden_states)
672
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
673
+ return outputs
674
+
675
+
676
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
677
+ class MraIntermediate(nn.Module):
678
+ def __init__(self, config):
679
+ super().__init__()
680
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
681
+ if isinstance(config.hidden_act, str):
682
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
683
+ else:
684
+ self.intermediate_act_fn = config.hidden_act
685
+
686
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
687
+ hidden_states = self.dense(hidden_states)
688
+ hidden_states = self.intermediate_act_fn(hidden_states)
689
+ return hidden_states
690
+
691
+
692
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
693
+ class MraOutput(nn.Module):
694
+ def __init__(self, config):
695
+ super().__init__()
696
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
697
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
698
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
699
+
700
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
701
+ hidden_states = self.dense(hidden_states)
702
+ hidden_states = self.dropout(hidden_states)
703
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
704
+ return hidden_states
705
+
706
+
707
+ class MraLayer(nn.Module):
708
+ def __init__(self, config):
709
+ super().__init__()
710
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
711
+ self.seq_len_dim = 1
712
+ self.attention = MraAttention(config)
713
+ self.add_cross_attention = config.add_cross_attention
714
+ self.intermediate = MraIntermediate(config)
715
+ self.output = MraOutput(config)
716
+
717
+ def forward(self, hidden_states, attention_mask=None):
718
+ self_attention_outputs = self.attention(hidden_states, attention_mask)
719
+ attention_output = self_attention_outputs[0]
720
+
721
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
722
+
723
+ layer_output = apply_chunking_to_forward(
724
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
725
+ )
726
+ outputs = (layer_output,) + outputs
727
+
728
+ return outputs
729
+
730
+ def feed_forward_chunk(self, attention_output):
731
+ intermediate_output = self.intermediate(attention_output)
732
+ layer_output = self.output(intermediate_output, attention_output)
733
+ return layer_output
734
+
735
+
736
+ class MraEncoder(nn.Module):
737
+ def __init__(self, config):
738
+ super().__init__()
739
+ self.config = config
740
+ self.layer = nn.ModuleList([MraLayer(config) for _ in range(config.num_hidden_layers)])
741
+ self.gradient_checkpointing = False
742
+
743
+ def forward(
744
+ self,
745
+ hidden_states,
746
+ attention_mask=None,
747
+ head_mask=None,
748
+ output_hidden_states=False,
749
+ return_dict=True,
750
+ ):
751
+ all_hidden_states = () if output_hidden_states else None
752
+
753
+ for i, layer_module in enumerate(self.layer):
754
+ if output_hidden_states:
755
+ all_hidden_states = all_hidden_states + (hidden_states,)
756
+
757
+ if self.gradient_checkpointing and self.training:
758
+ layer_outputs = self._gradient_checkpointing_func(
759
+ layer_module.__call__,
760
+ hidden_states,
761
+ attention_mask,
762
+ )
763
+ else:
764
+ layer_outputs = layer_module(hidden_states, attention_mask)
765
+
766
+ hidden_states = layer_outputs[0]
767
+
768
+ if output_hidden_states:
769
+ all_hidden_states = all_hidden_states + (hidden_states,)
770
+
771
+ if not return_dict:
772
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
773
+ return BaseModelOutputWithCrossAttentions(
774
+ last_hidden_state=hidden_states,
775
+ hidden_states=all_hidden_states,
776
+ )
777
+
778
+
779
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform
780
+ class MraPredictionHeadTransform(nn.Module):
781
+ def __init__(self, config):
782
+ super().__init__()
783
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
784
+ if isinstance(config.hidden_act, str):
785
+ self.transform_act_fn = ACT2FN[config.hidden_act]
786
+ else:
787
+ self.transform_act_fn = config.hidden_act
788
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
789
+
790
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
791
+ hidden_states = self.dense(hidden_states)
792
+ hidden_states = self.transform_act_fn(hidden_states)
793
+ hidden_states = self.LayerNorm(hidden_states)
794
+ return hidden_states
795
+
796
+
797
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Mra
798
+ class MraLMPredictionHead(nn.Module):
799
+ def __init__(self, config):
800
+ super().__init__()
801
+ self.transform = MraPredictionHeadTransform(config)
802
+
803
+ # The output weights are the same as the input embeddings, but there is
804
+ # an output-only bias for each token.
805
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
806
+
807
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
808
+
809
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
810
+ self.decoder.bias = self.bias
811
+
812
+ def forward(self, hidden_states):
813
+ hidden_states = self.transform(hidden_states)
814
+ hidden_states = self.decoder(hidden_states)
815
+ return hidden_states
816
+
817
+
818
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Mra
819
+ class MraOnlyMLMHead(nn.Module):
820
+ def __init__(self, config):
821
+ super().__init__()
822
+ self.predictions = MraLMPredictionHead(config)
823
+
824
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
825
+ prediction_scores = self.predictions(sequence_output)
826
+ return prediction_scores
827
+
828
+
829
+ # Copied from transformers.models.yoso.modeling_yoso.YosoPreTrainedModel with Yoso->Mra,yoso->mra
830
+ class MraPreTrainedModel(PreTrainedModel):
831
+ """
832
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
833
+ models.
834
+ """
835
+
836
+ config_class = MraConfig
837
+ base_model_prefix = "mra"
838
+ supports_gradient_checkpointing = True
839
+
840
+ def _init_weights(self, module):
841
+ """Initialize the weights"""
842
+ if isinstance(module, nn.Linear):
843
+ # Slightly different from the TF version which uses truncated_normal for initialization
844
+ # cf https://github.com/pytorch/pytorch/pull/5617
845
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
846
+ if module.bias is not None:
847
+ module.bias.data.zero_()
848
+ elif isinstance(module, nn.Embedding):
849
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
850
+ if module.padding_idx is not None:
851
+ module.weight.data[module.padding_idx].zero_()
852
+ elif isinstance(module, nn.LayerNorm):
853
+ module.bias.data.zero_()
854
+ module.weight.data.fill_(1.0)
855
+
856
+
857
+ MRA_START_DOCSTRING = r"""
858
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
859
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
860
+ behavior.
861
+
862
+ Parameters:
863
+ config ([`MraConfig`]): Model configuration class with all the parameters of the model.
864
+ Initializing with a config file does not load the weights associated with the model, only the
865
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
866
+ """
867
+
868
+ MRA_INPUTS_DOCSTRING = r"""
869
+ Args:
870
+ input_ids (`torch.LongTensor` of shape `({0})`):
871
+ Indices of input sequence tokens in the vocabulary.
872
+
873
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
874
+ [`PreTrainedTokenizer.__call__`] for details.
875
+
876
+ [What are input IDs?](../glossary#input-ids)
877
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
878
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
879
+
880
+ - 1 for tokens that are **not masked**,
881
+ - 0 for tokens that are **masked**.
882
+
883
+ [What are attention masks?](../glossary#attention-mask)
884
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
885
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
886
+ 1]`:
887
+
888
+ - 0 corresponds to a *sentence A* token,
889
+ - 1 corresponds to a *sentence B* token.
890
+
891
+ [What are token type IDs?](../glossary#token-type-ids)
892
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
893
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
894
+ config.max_position_embeddings - 1]`.
895
+
896
+ [What are position IDs?](../glossary#position-ids)
897
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
898
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
899
+
900
+ - 1 indicates the head is **not masked**,
901
+ - 0 indicates the head is **masked**.
902
+
903
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
904
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
905
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
906
+ model's internal embedding lookup matrix.
907
+ output_hidden_states (`bool`, *optional*):
908
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
909
+ more detail.
910
+ return_dict (`bool`, *optional*):
911
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
912
+ """
913
+
914
+
915
+ @add_start_docstrings(
916
+ "The bare MRA Model transformer outputting raw hidden-states without any specific head on top.",
917
+ MRA_START_DOCSTRING,
918
+ )
919
+ class MraModel(MraPreTrainedModel):
920
+ def __init__(self, config):
921
+ super().__init__(config)
922
+ self.config = config
923
+
924
+ self.embeddings = MraEmbeddings(config)
925
+ self.encoder = MraEncoder(config)
926
+
927
+ # Initialize weights and apply final processing
928
+ self.post_init()
929
+
930
+ def get_input_embeddings(self):
931
+ return self.embeddings.word_embeddings
932
+
933
+ def set_input_embeddings(self, value):
934
+ self.embeddings.word_embeddings = value
935
+
936
+ def _prune_heads(self, heads_to_prune):
937
+ """
938
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
939
+ class PreTrainedModel
940
+ """
941
+ for layer, heads in heads_to_prune.items():
942
+ self.encoder.layer[layer].attention.prune_heads(heads)
943
+
944
+ @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
945
+ @add_code_sample_docstrings(
946
+ checkpoint=_CHECKPOINT_FOR_DOC,
947
+ output_type=BaseModelOutputWithCrossAttentions,
948
+ config_class=_CONFIG_FOR_DOC,
949
+ )
950
+ def forward(
951
+ self,
952
+ input_ids: Optional[torch.Tensor] = None,
953
+ attention_mask: Optional[torch.Tensor] = None,
954
+ token_type_ids: Optional[torch.Tensor] = None,
955
+ position_ids: Optional[torch.Tensor] = None,
956
+ head_mask: Optional[torch.Tensor] = None,
957
+ inputs_embeds: Optional[torch.Tensor] = None,
958
+ output_hidden_states: Optional[bool] = None,
959
+ return_dict: Optional[bool] = None,
960
+ ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
961
+ output_hidden_states = (
962
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
963
+ )
964
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
965
+
966
+ if input_ids is not None and inputs_embeds is not None:
967
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
968
+ elif input_ids is not None:
969
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
970
+ input_shape = input_ids.size()
971
+ elif inputs_embeds is not None:
972
+ input_shape = inputs_embeds.size()[:-1]
973
+ else:
974
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
975
+
976
+ batch_size, seq_length = input_shape
977
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
978
+
979
+ if attention_mask is None:
980
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
981
+
982
+ if token_type_ids is None:
983
+ if hasattr(self.embeddings, "token_type_ids"):
984
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
985
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
986
+ token_type_ids = buffered_token_type_ids_expanded
987
+ else:
988
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
989
+
990
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
991
+ # ourselves in which case we just need to make it broadcastable to all heads.
992
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
993
+
994
+ # Prepare head mask if needed
995
+ # 1.0 in head_mask indicate we keep the head
996
+ # attention_probs has shape bsz x n_heads x N x N
997
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
998
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
999
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1000
+
1001
+ embedding_output = self.embeddings(
1002
+ input_ids=input_ids,
1003
+ position_ids=position_ids,
1004
+ token_type_ids=token_type_ids,
1005
+ inputs_embeds=inputs_embeds,
1006
+ )
1007
+ encoder_outputs = self.encoder(
1008
+ embedding_output,
1009
+ attention_mask=extended_attention_mask,
1010
+ head_mask=head_mask,
1011
+ output_hidden_states=output_hidden_states,
1012
+ return_dict=return_dict,
1013
+ )
1014
+ sequence_output = encoder_outputs[0]
1015
+
1016
+ if not return_dict:
1017
+ return (sequence_output,) + encoder_outputs[1:]
1018
+
1019
+ return BaseModelOutputWithCrossAttentions(
1020
+ last_hidden_state=sequence_output,
1021
+ hidden_states=encoder_outputs.hidden_states,
1022
+ attentions=encoder_outputs.attentions,
1023
+ cross_attentions=encoder_outputs.cross_attentions,
1024
+ )
1025
+
1026
+
1027
+ @add_start_docstrings("""MRA Model with a `language modeling` head on top.""", MRA_START_DOCSTRING)
1028
+ class MraForMaskedLM(MraPreTrainedModel):
1029
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
1030
+
1031
+ def __init__(self, config):
1032
+ super().__init__(config)
1033
+
1034
+ self.mra = MraModel(config)
1035
+ self.cls = MraOnlyMLMHead(config)
1036
+
1037
+ # Initialize weights and apply final processing
1038
+ self.post_init()
1039
+
1040
+ def get_output_embeddings(self):
1041
+ return self.cls.predictions.decoder
1042
+
1043
+ def set_output_embeddings(self, new_embeddings):
1044
+ self.cls.predictions.decoder = new_embeddings
1045
+
1046
+ @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1047
+ @add_code_sample_docstrings(
1048
+ checkpoint=_CHECKPOINT_FOR_DOC,
1049
+ output_type=MaskedLMOutput,
1050
+ config_class=_CONFIG_FOR_DOC,
1051
+ )
1052
+ def forward(
1053
+ self,
1054
+ input_ids: Optional[torch.Tensor] = None,
1055
+ attention_mask: Optional[torch.Tensor] = None,
1056
+ token_type_ids: Optional[torch.Tensor] = None,
1057
+ position_ids: Optional[torch.Tensor] = None,
1058
+ head_mask: Optional[torch.Tensor] = None,
1059
+ inputs_embeds: Optional[torch.Tensor] = None,
1060
+ labels: Optional[torch.Tensor] = None,
1061
+ output_hidden_states: Optional[bool] = None,
1062
+ return_dict: Optional[bool] = None,
1063
+ ) -> Union[Tuple, MaskedLMOutput]:
1064
+ r"""
1065
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1066
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1067
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1068
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1069
+ """
1070
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1071
+
1072
+ outputs = self.mra(
1073
+ input_ids,
1074
+ attention_mask=attention_mask,
1075
+ token_type_ids=token_type_ids,
1076
+ position_ids=position_ids,
1077
+ head_mask=head_mask,
1078
+ inputs_embeds=inputs_embeds,
1079
+ output_hidden_states=output_hidden_states,
1080
+ return_dict=return_dict,
1081
+ )
1082
+
1083
+ sequence_output = outputs[0]
1084
+ prediction_scores = self.cls(sequence_output)
1085
+
1086
+ masked_lm_loss = None
1087
+ if labels is not None:
1088
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1089
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1090
+
1091
+ if not return_dict:
1092
+ output = (prediction_scores,) + outputs[1:]
1093
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1094
+
1095
+ return MaskedLMOutput(
1096
+ loss=masked_lm_loss,
1097
+ logits=prediction_scores,
1098
+ hidden_states=outputs.hidden_states,
1099
+ attentions=outputs.attentions,
1100
+ )
1101
+
1102
+
1103
+ # Copied from transformers.models.yoso.modeling_yoso.YosoClassificationHead with Yoso->Mra
1104
+ class MraClassificationHead(nn.Module):
1105
+ """Head for sentence-level classification tasks."""
1106
+
1107
+ def __init__(self, config):
1108
+ super().__init__()
1109
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1110
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1111
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1112
+
1113
+ self.config = config
1114
+
1115
+ def forward(self, features, **kwargs):
1116
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1117
+ x = self.dropout(x)
1118
+ x = self.dense(x)
1119
+ x = ACT2FN[self.config.hidden_act](x)
1120
+ x = self.dropout(x)
1121
+ x = self.out_proj(x)
1122
+ return x
1123
+
1124
+
1125
+ @add_start_docstrings(
1126
+ """MRA Model transformer with a sequence classification/regression head on top (a linear layer on top of
1127
+ the pooled output) e.g. for GLUE tasks.""",
1128
+ MRA_START_DOCSTRING,
1129
+ )
1130
+ class MraForSequenceClassification(MraPreTrainedModel):
1131
+ def __init__(self, config):
1132
+ super().__init__(config)
1133
+ self.num_labels = config.num_labels
1134
+ self.mra = MraModel(config)
1135
+ self.classifier = MraClassificationHead(config)
1136
+
1137
+ # Initialize weights and apply final processing
1138
+ self.post_init()
1139
+
1140
+ @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1141
+ @add_code_sample_docstrings(
1142
+ checkpoint=_CHECKPOINT_FOR_DOC,
1143
+ output_type=SequenceClassifierOutput,
1144
+ config_class=_CONFIG_FOR_DOC,
1145
+ )
1146
+ def forward(
1147
+ self,
1148
+ input_ids: Optional[torch.Tensor] = None,
1149
+ attention_mask: Optional[torch.Tensor] = None,
1150
+ token_type_ids: Optional[torch.Tensor] = None,
1151
+ position_ids: Optional[torch.Tensor] = None,
1152
+ head_mask: Optional[torch.Tensor] = None,
1153
+ inputs_embeds: Optional[torch.Tensor] = None,
1154
+ labels: Optional[torch.Tensor] = None,
1155
+ output_hidden_states: Optional[bool] = None,
1156
+ return_dict: Optional[bool] = None,
1157
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1158
+ r"""
1159
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1160
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1161
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1162
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1163
+ """
1164
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1165
+
1166
+ outputs = self.mra(
1167
+ input_ids,
1168
+ attention_mask=attention_mask,
1169
+ token_type_ids=token_type_ids,
1170
+ position_ids=position_ids,
1171
+ head_mask=head_mask,
1172
+ inputs_embeds=inputs_embeds,
1173
+ output_hidden_states=output_hidden_states,
1174
+ return_dict=return_dict,
1175
+ )
1176
+
1177
+ sequence_output = outputs[0]
1178
+ logits = self.classifier(sequence_output)
1179
+
1180
+ loss = None
1181
+ if labels is not None:
1182
+ if self.config.problem_type is None:
1183
+ if self.num_labels == 1:
1184
+ self.config.problem_type = "regression"
1185
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1186
+ self.config.problem_type = "single_label_classification"
1187
+ else:
1188
+ self.config.problem_type = "multi_label_classification"
1189
+
1190
+ if self.config.problem_type == "regression":
1191
+ loss_fct = MSELoss()
1192
+ if self.num_labels == 1:
1193
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1194
+ else:
1195
+ loss = loss_fct(logits, labels)
1196
+ elif self.config.problem_type == "single_label_classification":
1197
+ loss_fct = CrossEntropyLoss()
1198
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1199
+ elif self.config.problem_type == "multi_label_classification":
1200
+ loss_fct = BCEWithLogitsLoss()
1201
+ loss = loss_fct(logits, labels)
1202
+ if not return_dict:
1203
+ output = (logits,) + outputs[1:]
1204
+ return ((loss,) + output) if loss is not None else output
1205
+
1206
+ return SequenceClassifierOutput(
1207
+ loss=loss,
1208
+ logits=logits,
1209
+ hidden_states=outputs.hidden_states,
1210
+ attentions=outputs.attentions,
1211
+ )
1212
+
1213
+
1214
+ @add_start_docstrings(
1215
+ """MRA Model with a multiple choice classification head on top (a linear layer on top of
1216
+ the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""",
1217
+ MRA_START_DOCSTRING,
1218
+ )
1219
+ class MraForMultipleChoice(MraPreTrainedModel):
1220
+ def __init__(self, config):
1221
+ super().__init__(config)
1222
+
1223
+ self.mra = MraModel(config)
1224
+ self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size)
1225
+ self.classifier = nn.Linear(config.hidden_size, 1)
1226
+
1227
+ # Initialize weights and apply final processing
1228
+ self.post_init()
1229
+
1230
+ @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1231
+ @add_code_sample_docstrings(
1232
+ checkpoint=_CHECKPOINT_FOR_DOC,
1233
+ output_type=MultipleChoiceModelOutput,
1234
+ config_class=_CONFIG_FOR_DOC,
1235
+ )
1236
+ def forward(
1237
+ self,
1238
+ input_ids: Optional[torch.Tensor] = None,
1239
+ attention_mask: Optional[torch.Tensor] = None,
1240
+ token_type_ids: Optional[torch.Tensor] = None,
1241
+ position_ids: Optional[torch.Tensor] = None,
1242
+ head_mask: Optional[torch.Tensor] = None,
1243
+ inputs_embeds: Optional[torch.Tensor] = None,
1244
+ labels: Optional[torch.Tensor] = None,
1245
+ output_hidden_states: Optional[bool] = None,
1246
+ return_dict: Optional[bool] = None,
1247
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
1248
+ r"""
1249
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1250
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1251
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1252
+ `input_ids` above)
1253
+ """
1254
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1255
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1256
+
1257
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1258
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1259
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1260
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1261
+ inputs_embeds = (
1262
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1263
+ if inputs_embeds is not None
1264
+ else None
1265
+ )
1266
+
1267
+ outputs = self.mra(
1268
+ input_ids,
1269
+ attention_mask=attention_mask,
1270
+ token_type_ids=token_type_ids,
1271
+ position_ids=position_ids,
1272
+ head_mask=head_mask,
1273
+ inputs_embeds=inputs_embeds,
1274
+ output_hidden_states=output_hidden_states,
1275
+ return_dict=return_dict,
1276
+ )
1277
+
1278
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
1279
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
1280
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
1281
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
1282
+ logits = self.classifier(pooled_output)
1283
+
1284
+ reshaped_logits = logits.view(-1, num_choices)
1285
+
1286
+ loss = None
1287
+ if labels is not None:
1288
+ loss_fct = CrossEntropyLoss()
1289
+ loss = loss_fct(reshaped_logits, labels)
1290
+
1291
+ if not return_dict:
1292
+ output = (reshaped_logits,) + outputs[1:]
1293
+ return ((loss,) + output) if loss is not None else output
1294
+
1295
+ return MultipleChoiceModelOutput(
1296
+ loss=loss,
1297
+ logits=reshaped_logits,
1298
+ hidden_states=outputs.hidden_states,
1299
+ attentions=outputs.attentions,
1300
+ )
1301
+
1302
+
1303
+ @add_start_docstrings(
1304
+ """MRA Model with a token classification head on top (a linear layer on top of
1305
+ the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""",
1306
+ MRA_START_DOCSTRING,
1307
+ )
1308
+ class MraForTokenClassification(MraPreTrainedModel):
1309
+ def __init__(self, config):
1310
+ super().__init__(config)
1311
+ self.num_labels = config.num_labels
1312
+
1313
+ self.mra = MraModel(config)
1314
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1315
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1316
+
1317
+ # Initialize weights and apply final processing
1318
+ self.post_init()
1319
+
1320
+ @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1321
+ @add_code_sample_docstrings(
1322
+ checkpoint=_CHECKPOINT_FOR_DOC,
1323
+ output_type=TokenClassifierOutput,
1324
+ config_class=_CONFIG_FOR_DOC,
1325
+ )
1326
+ def forward(
1327
+ self,
1328
+ input_ids: Optional[torch.Tensor] = None,
1329
+ attention_mask: Optional[torch.Tensor] = None,
1330
+ token_type_ids: Optional[torch.Tensor] = None,
1331
+ position_ids: Optional[torch.Tensor] = None,
1332
+ head_mask: Optional[torch.Tensor] = None,
1333
+ inputs_embeds: Optional[torch.Tensor] = None,
1334
+ labels: Optional[torch.Tensor] = None,
1335
+ output_hidden_states: Optional[bool] = None,
1336
+ return_dict: Optional[bool] = None,
1337
+ ) -> Union[Tuple, TokenClassifierOutput]:
1338
+ r"""
1339
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1340
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1341
+ """
1342
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1343
+
1344
+ outputs = self.mra(
1345
+ input_ids,
1346
+ attention_mask=attention_mask,
1347
+ token_type_ids=token_type_ids,
1348
+ position_ids=position_ids,
1349
+ head_mask=head_mask,
1350
+ inputs_embeds=inputs_embeds,
1351
+ output_hidden_states=output_hidden_states,
1352
+ return_dict=return_dict,
1353
+ )
1354
+
1355
+ sequence_output = outputs[0]
1356
+
1357
+ sequence_output = self.dropout(sequence_output)
1358
+ logits = self.classifier(sequence_output)
1359
+
1360
+ loss = None
1361
+ if labels is not None:
1362
+ loss_fct = CrossEntropyLoss()
1363
+ # Only keep active parts of the loss
1364
+ if attention_mask is not None:
1365
+ active_loss = attention_mask.view(-1) == 1
1366
+ active_logits = logits.view(-1, self.num_labels)
1367
+ active_labels = torch.where(
1368
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
1369
+ )
1370
+ loss = loss_fct(active_logits, active_labels)
1371
+ else:
1372
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1373
+
1374
+ if not return_dict:
1375
+ output = (logits,) + outputs[1:]
1376
+ return ((loss,) + output) if loss is not None else output
1377
+
1378
+ return TokenClassifierOutput(
1379
+ loss=loss,
1380
+ logits=logits,
1381
+ hidden_states=outputs.hidden_states,
1382
+ attentions=outputs.attentions,
1383
+ )
1384
+
1385
+
1386
+ @add_start_docstrings(
1387
+ """MRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1388
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""",
1389
+ MRA_START_DOCSTRING,
1390
+ )
1391
+ class MraForQuestionAnswering(MraPreTrainedModel):
1392
+ def __init__(self, config):
1393
+ super().__init__(config)
1394
+
1395
+ config.num_labels = 2
1396
+ self.num_labels = config.num_labels
1397
+
1398
+ self.mra = MraModel(config)
1399
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1400
+
1401
+ # Initialize weights and apply final processing
1402
+ self.post_init()
1403
+
1404
+ @add_start_docstrings_to_model_forward(MRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1405
+ @add_code_sample_docstrings(
1406
+ checkpoint=_CHECKPOINT_FOR_DOC,
1407
+ output_type=QuestionAnsweringModelOutput,
1408
+ config_class=_CONFIG_FOR_DOC,
1409
+ )
1410
+ def forward(
1411
+ self,
1412
+ input_ids: Optional[torch.Tensor] = None,
1413
+ attention_mask: Optional[torch.Tensor] = None,
1414
+ token_type_ids: Optional[torch.Tensor] = None,
1415
+ position_ids: Optional[torch.Tensor] = None,
1416
+ head_mask: Optional[torch.Tensor] = None,
1417
+ inputs_embeds: Optional[torch.Tensor] = None,
1418
+ start_positions: Optional[torch.Tensor] = None,
1419
+ end_positions: Optional[torch.Tensor] = None,
1420
+ output_hidden_states: Optional[bool] = None,
1421
+ return_dict: Optional[bool] = None,
1422
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1423
+ r"""
1424
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1425
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1426
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1427
+ are not taken into account for computing the loss.
1428
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1429
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1430
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1431
+ are not taken into account for computing the loss.
1432
+ """
1433
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1434
+
1435
+ outputs = self.mra(
1436
+ input_ids,
1437
+ attention_mask=attention_mask,
1438
+ token_type_ids=token_type_ids,
1439
+ position_ids=position_ids,
1440
+ head_mask=head_mask,
1441
+ inputs_embeds=inputs_embeds,
1442
+ output_hidden_states=output_hidden_states,
1443
+ return_dict=return_dict,
1444
+ )
1445
+
1446
+ sequence_output = outputs[0]
1447
+
1448
+ logits = self.qa_outputs(sequence_output)
1449
+ start_logits, end_logits = logits.split(1, dim=-1)
1450
+ start_logits = start_logits.squeeze(-1)
1451
+ end_logits = end_logits.squeeze(-1)
1452
+
1453
+ total_loss = None
1454
+ if start_positions is not None and end_positions is not None:
1455
+ # If we are on multi-GPU, split add a dimension
1456
+ if len(start_positions.size()) > 1:
1457
+ start_positions = start_positions.squeeze(-1)
1458
+ if len(end_positions.size()) > 1:
1459
+ end_positions = end_positions.squeeze(-1)
1460
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1461
+ ignored_index = start_logits.size(1)
1462
+ start_positions = start_positions.clamp(0, ignored_index)
1463
+ end_positions = end_positions.clamp(0, ignored_index)
1464
+
1465
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1466
+ start_loss = loss_fct(start_logits, start_positions)
1467
+ end_loss = loss_fct(end_logits, end_positions)
1468
+ total_loss = (start_loss + end_loss) / 2
1469
+
1470
+ if not return_dict:
1471
+ output = (start_logits, end_logits) + outputs[1:]
1472
+ return ((total_loss,) + output) if total_loss is not None else output
1473
+
1474
+ return QuestionAnsweringModelOutput(
1475
+ loss=total_loss,
1476
+ start_logits=start_logits,
1477
+ end_logits=end_logits,
1478
+ hidden_states=outputs.hidden_states,
1479
+ attentions=outputs.attentions,
1480
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_nezha"] = [
30
+ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
31
+ "NezhaForNextSentencePrediction",
32
+ "NezhaForMaskedLM",
33
+ "NezhaForPreTraining",
34
+ "NezhaForMultipleChoice",
35
+ "NezhaForQuestionAnswering",
36
+ "NezhaForSequenceClassification",
37
+ "NezhaForTokenClassification",
38
+ "NezhaModel",
39
+ "NezhaPreTrainedModel",
40
+ ]
41
+
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_nezha import (
53
+ NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ NezhaForMaskedLM,
55
+ NezhaForMultipleChoice,
56
+ NezhaForNextSentencePrediction,
57
+ NezhaForPreTraining,
58
+ NezhaForQuestionAnswering,
59
+ NezhaForSequenceClassification,
60
+ NezhaForTokenClassification,
61
+ NezhaModel,
62
+ NezhaPreTrainedModel,
63
+ )
64
+
65
+
66
+ else:
67
+ import sys
68
+
69
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/configuration_nezha.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/modeling_nezha.cpython-310.pyc ADDED
Binary file (49.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/configuration_nezha.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ... import PretrainedConfig
2
+ from ..deprecated._archive_maps import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
3
+
4
+
5
+ class NezhaConfig(PretrainedConfig):
6
+ r"""
7
+ This is the configuration class to store the configuration of an [`NezhaModel`]. It is used to instantiate an Nezha
8
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
9
+ defaults will yield a similar configuration to that of the Nezha
10
+ [sijunhe/nezha-cn-base](https://huggingface.co/sijunhe/nezha-cn-base) architecture.
11
+
12
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
13
+ documentation from [`PretrainedConfig`] for more information.
14
+
15
+
16
+ Args:
17
+ vocab_size (`int`, optional, defaults to 21128):
18
+ Vocabulary size of the NEZHA model. Defines the different tokens that can be represented by the
19
+ *inputs_ids* passed to the forward method of [`NezhaModel`].
20
+ hidden_size (`int`, optional, defaults to 768):
21
+ Dimensionality of the encoder layers and the pooler layer.
22
+ num_hidden_layers (`int`, optional, defaults to 12):
23
+ Number of hidden layers in the Transformer encoder.
24
+ num_attention_heads (`int`, optional, defaults to 12):
25
+ Number of attention heads for each attention layer in the Transformer encoder.
26
+ intermediate_size (`int`, optional, defaults to 3072):
27
+ The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
28
+ hidden_act (`str` or `function`, optional, defaults to "gelu"):
29
+ The non-linear activation function (function or string) in the encoder and pooler.
30
+ hidden_dropout_prob (`float`, optional, defaults to 0.1):
31
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
32
+ attention_probs_dropout_prob (`float`, optional, defaults to 0.1):
33
+ The dropout ratio for the attention probabilities.
34
+ max_position_embeddings (`int`, optional, defaults to 512):
35
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
36
+ (e.g., 512 or 1024 or 2048).
37
+ type_vocab_size (`int`, optional, defaults to 2):
38
+ The vocabulary size of the *token_type_ids* passed into [`NezhaModel`].
39
+ initializer_range (`float`, optional, defaults to 0.02):
40
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
41
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
42
+ The epsilon used by the layer normalization layers.
43
+ classifier_dropout (`float`, optional, defaults to 0.1):
44
+ The dropout ratio for attached classifiers.
45
+ is_decoder (`bool`, *optional*, defaults to `False`):
46
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
47
+
48
+ Example:
49
+
50
+ ```python
51
+ >>> from transformers import NezhaConfig, NezhaModel
52
+
53
+ >>> # Initializing an Nezha configuration
54
+ >>> configuration = NezhaConfig()
55
+
56
+ >>> # Initializing a model (with random weights) from the Nezha-base style configuration model
57
+ >>> model = NezhaModel(configuration)
58
+
59
+ >>> # Accessing the model configuration
60
+ >>> configuration = model.config
61
+ ```"""
62
+
63
+ model_type = "nezha"
64
+
65
+ def __init__(
66
+ self,
67
+ vocab_size=21128,
68
+ hidden_size=768,
69
+ num_hidden_layers=12,
70
+ num_attention_heads=12,
71
+ intermediate_size=3072,
72
+ hidden_act="gelu",
73
+ hidden_dropout_prob=0.1,
74
+ attention_probs_dropout_prob=0.1,
75
+ max_position_embeddings=512,
76
+ max_relative_position=64,
77
+ type_vocab_size=2,
78
+ initializer_range=0.02,
79
+ layer_norm_eps=1e-12,
80
+ classifier_dropout=0.1,
81
+ pad_token_id=0,
82
+ bos_token_id=2,
83
+ eos_token_id=3,
84
+ use_cache=True,
85
+ **kwargs,
86
+ ):
87
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
88
+
89
+ self.vocab_size = vocab_size
90
+ self.hidden_size = hidden_size
91
+ self.num_hidden_layers = num_hidden_layers
92
+ self.num_attention_heads = num_attention_heads
93
+ self.hidden_act = hidden_act
94
+ self.intermediate_size = intermediate_size
95
+ self.hidden_dropout_prob = hidden_dropout_prob
96
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
97
+ self.max_position_embeddings = max_position_embeddings
98
+ self.max_relative_position = max_relative_position
99
+ self.type_vocab_size = type_vocab_size
100
+ self.initializer_range = initializer_range
101
+ self.layer_norm_eps = layer_norm_eps
102
+ self.classifier_dropout = classifier_dropout
103
+ self.use_cache = use_cache
llmeval-env/lib/python3.10/site-packages/transformers/models/nezha/modeling_nezha.py ADDED
@@ -0,0 +1,1693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Nezha model."""
16
+
17
+
18
+ import math
19
+ import os
20
+ import warnings
21
+ from dataclasses import dataclass
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from ...activations import ACT2FN
30
+ from ...modeling_outputs import (
31
+ BaseModelOutputWithPastAndCrossAttentions,
32
+ BaseModelOutputWithPoolingAndCrossAttentions,
33
+ MaskedLMOutput,
34
+ MultipleChoiceModelOutput,
35
+ NextSentencePredictorOutput,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutput,
38
+ TokenClassifierOutput,
39
+ )
40
+ from ...modeling_utils import PreTrainedModel
41
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
42
+ from ...utils import (
43
+ ModelOutput,
44
+ add_code_sample_docstrings,
45
+ add_start_docstrings,
46
+ add_start_docstrings_to_model_forward,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from .configuration_nezha import NezhaConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "sijunhe/nezha-cn-base"
56
+ _CONFIG_FOR_DOC = "NezhaConfig"
57
+
58
+
59
+ from ..deprecated._archive_maps import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
60
+
61
+
62
+ def load_tf_weights_in_nezha(model, config, tf_checkpoint_path):
63
+ """Load tf checkpoints in a pytorch model."""
64
+ try:
65
+ import re
66
+
67
+ import numpy as np
68
+ import tensorflow as tf
69
+ except ImportError:
70
+ logger.error(
71
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
72
+ "https://www.tensorflow.org/install/ for installation instructions."
73
+ )
74
+ raise
75
+ tf_path = os.path.abspath(tf_checkpoint_path)
76
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
77
+ # Load weights from TF model
78
+ init_vars = tf.train.list_variables(tf_path)
79
+ names = []
80
+ arrays = []
81
+ for name, shape in init_vars:
82
+ logger.info(f"Loading TF weight {name} with shape {shape}")
83
+ array = tf.train.load_variable(tf_path, name)
84
+ names.append(name)
85
+ arrays.append(array)
86
+
87
+ for name, array in zip(names, arrays):
88
+ name = name.split("/")
89
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
90
+ # which are not required for using pretrained model
91
+ if any(
92
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
93
+ for n in name
94
+ ):
95
+ logger.info(f"Skipping {'/'.join(name)}")
96
+ continue
97
+ pointer = model
98
+ for m_name in name:
99
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
100
+ scope_names = re.split(r"_(\d+)", m_name)
101
+ else:
102
+ scope_names = [m_name]
103
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
104
+ pointer = getattr(pointer, "weight")
105
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
106
+ pointer = getattr(pointer, "bias")
107
+ elif scope_names[0] == "output_weights":
108
+ pointer = getattr(pointer, "weight")
109
+ elif scope_names[0] == "squad":
110
+ pointer = getattr(pointer, "classifier")
111
+ else:
112
+ try:
113
+ pointer = getattr(pointer, scope_names[0])
114
+ except AttributeError:
115
+ logger.info(f"Skipping {'/'.join(name)}")
116
+ continue
117
+ if len(scope_names) >= 2:
118
+ num = int(scope_names[1])
119
+ pointer = pointer[num]
120
+ if m_name[-11:] == "_embeddings":
121
+ pointer = getattr(pointer, "weight")
122
+ elif m_name == "kernel":
123
+ array = np.transpose(array)
124
+ try:
125
+ if pointer.shape != array.shape:
126
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
127
+ except AssertionError as e:
128
+ e.args += (pointer.shape, array.shape)
129
+ raise
130
+ logger.info(f"Initialize PyTorch weight {name}")
131
+ pointer.data = torch.from_numpy(array)
132
+ return model
133
+
134
+
135
+ class NezhaRelativePositionsEncoding(nn.Module):
136
+ """Implement the Functional Relative Position Encoding"""
137
+
138
+ def __init__(self, length, depth, max_relative_position=127):
139
+ super().__init__()
140
+ vocab_size = max_relative_position * 2 + 1
141
+ range_vec = torch.arange(length)
142
+ range_mat = range_vec.repeat(length).view(length, length)
143
+ distance_mat = range_mat - torch.t(range_mat)
144
+ distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position)
145
+ final_mat = distance_mat_clipped + max_relative_position
146
+
147
+ embeddings_table = torch.zeros(vocab_size, depth)
148
+ position = torch.arange(0, vocab_size, dtype=torch.int64).float().unsqueeze(1)
149
+ div_term = torch.exp(torch.arange(0, depth, 2).float() * (-math.log(10000.0) / depth))
150
+ embeddings_table[:, 0::2] = torch.sin(position * div_term)
151
+ embeddings_table[:, 1::2] = torch.cos(position * div_term)
152
+
153
+ flat_relative_positions_matrix = final_mat.view(-1)
154
+ one_hot_relative_positions_matrix = torch.nn.functional.one_hot(
155
+ flat_relative_positions_matrix, num_classes=vocab_size
156
+ ).float()
157
+ positions_encoding = torch.matmul(one_hot_relative_positions_matrix, embeddings_table)
158
+ my_shape = list(final_mat.size())
159
+ my_shape.append(depth)
160
+ positions_encoding = positions_encoding.view(my_shape)
161
+ self.register_buffer("positions_encoding", positions_encoding, persistent=False)
162
+
163
+ def forward(self, length):
164
+ return self.positions_encoding[:length, :length, :]
165
+
166
+
167
+ class NezhaEmbeddings(nn.Module):
168
+ """Construct the embeddings from word and token_type embeddings."""
169
+
170
+ def __init__(self, config):
171
+ super().__init__()
172
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
173
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
174
+
175
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
176
+ # any TensorFlow checkpoint file
177
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
178
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
179
+ self.register_buffer(
180
+ "token_type_ids", torch.zeros((1, config.max_position_embeddings), dtype=torch.long), persistent=False
181
+ )
182
+
183
+ def forward(
184
+ self,
185
+ input_ids: Optional[torch.LongTensor] = None,
186
+ token_type_ids: Optional[torch.LongTensor] = None,
187
+ inputs_embeds: Optional[torch.FloatTensor] = None,
188
+ ) -> torch.Tensor:
189
+ if input_ids is not None:
190
+ input_shape = input_ids.size()
191
+ else:
192
+ input_shape = inputs_embeds.size()[:-1]
193
+
194
+ seq_length = input_shape[1]
195
+
196
+ if inputs_embeds is None:
197
+ inputs_embeds = self.word_embeddings(input_ids)
198
+
199
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
200
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
201
+ # issue #5664
202
+ if token_type_ids is None:
203
+ if hasattr(self, "token_type_ids"):
204
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
205
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
206
+ token_type_ids = buffered_token_type_ids_expanded
207
+ else:
208
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=inputs_embeds.device)
209
+
210
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
211
+
212
+ embeddings = inputs_embeds + token_type_embeddings
213
+ embeddings = self.LayerNorm(embeddings)
214
+ embeddings = self.dropout(embeddings)
215
+ return embeddings
216
+
217
+
218
+ class NezhaSelfAttention(nn.Module):
219
+ def __init__(self, config):
220
+ super().__init__()
221
+ if config.hidden_size % config.num_attention_heads != 0:
222
+ raise ValueError(
223
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
224
+ f"heads ({config.num_attention_heads})"
225
+ )
226
+
227
+ self.num_attention_heads = config.num_attention_heads
228
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
229
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
230
+
231
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
232
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
233
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
234
+
235
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
236
+ self.relative_positions_encoding = NezhaRelativePositionsEncoding(
237
+ length=config.max_position_embeddings,
238
+ depth=self.attention_head_size,
239
+ max_relative_position=config.max_relative_position,
240
+ )
241
+ self.is_decoder = config.is_decoder
242
+
243
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
244
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
245
+ x = x.view(new_x_shape)
246
+ return x.permute(0, 2, 1, 3)
247
+
248
+ def forward(
249
+ self,
250
+ hidden_states: torch.Tensor,
251
+ attention_mask: Optional[torch.FloatTensor] = None,
252
+ head_mask: Optional[torch.FloatTensor] = None,
253
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
254
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
255
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
256
+ output_attentions: Optional[bool] = False,
257
+ ) -> Tuple[torch.Tensor]:
258
+ mixed_query_layer = self.query(hidden_states)
259
+
260
+ # If this is instantiated as a cross-attention module, the keys
261
+ # and values come from an encoder; the attention mask needs to be
262
+ # such that the encoder's padding tokens are not attended to.
263
+ is_cross_attention = encoder_hidden_states is not None
264
+
265
+ if is_cross_attention and past_key_value is not None:
266
+ # reuse k,v, cross_attentions
267
+ key_layer = past_key_value[0]
268
+ value_layer = past_key_value[1]
269
+ attention_mask = encoder_attention_mask
270
+ elif is_cross_attention:
271
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
272
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
273
+ attention_mask = encoder_attention_mask
274
+ elif past_key_value is not None:
275
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
276
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
277
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
278
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
279
+ else:
280
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
281
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
282
+
283
+ query_layer = self.transpose_for_scores(mixed_query_layer)
284
+
285
+ if self.is_decoder:
286
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
287
+ # Further calls to cross_attention layer can then reuse all cross-attention
288
+ # key/value_states (first "if" case)
289
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
290
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
291
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
292
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
293
+ past_key_value = (key_layer, value_layer)
294
+
295
+ # Take the dot product between "query" and "key" to get the raw attention scores.
296
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
297
+
298
+ batch_size, num_attention_heads, from_seq_length, to_seq_length = attention_scores.size()
299
+ relations_keys = self.relative_positions_encoding(to_seq_length)
300
+ query_layer_t = query_layer.permute(2, 0, 1, 3)
301
+
302
+ query_layer_r = query_layer_t.contiguous().view(
303
+ from_seq_length, batch_size * num_attention_heads, self.attention_head_size
304
+ )
305
+ key_position_scores = torch.matmul(query_layer_r, relations_keys.permute(0, 2, 1))
306
+ key_position_scores_r = key_position_scores.view(
307
+ from_seq_length, batch_size, num_attention_heads, from_seq_length
308
+ )
309
+ key_position_scores_r_t = key_position_scores_r.permute(1, 2, 0, 3)
310
+ attention_scores = attention_scores + key_position_scores_r_t
311
+
312
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
313
+
314
+ if attention_mask is not None:
315
+ # Apply the attention mask is (precomputed for all layers in NezhaModel forward() function)
316
+ attention_scores = attention_scores + attention_mask
317
+
318
+ # Normalize the attention scores to probabilities.
319
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
320
+
321
+ # This is actually dropping out entire tokens to attend to, which might
322
+ # seem a bit unusual, but is taken from the original Transformer paper.
323
+ attention_probs = self.dropout(attention_probs)
324
+
325
+ # Mask heads if we want to
326
+ if head_mask is not None:
327
+ attention_probs = attention_probs * head_mask
328
+
329
+ context_layer = torch.matmul(attention_probs, value_layer)
330
+ relations_values = self.relative_positions_encoding(to_seq_length)
331
+ attention_probs_t = attention_probs.permute(2, 0, 1, 3)
332
+ attentions_probs_r = attention_probs_t.contiguous().view(
333
+ from_seq_length, batch_size * num_attention_heads, to_seq_length
334
+ )
335
+ value_position_scores = torch.matmul(attentions_probs_r, relations_values)
336
+ value_position_scores_r = value_position_scores.view(
337
+ from_seq_length, batch_size, num_attention_heads, self.attention_head_size
338
+ )
339
+ value_position_scores_r_t = value_position_scores_r.permute(1, 2, 0, 3)
340
+ context_layer = context_layer + value_position_scores_r_t
341
+
342
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
343
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
344
+ context_layer = context_layer.view(new_context_layer_shape)
345
+
346
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
347
+
348
+ if self.is_decoder:
349
+ outputs = outputs + (past_key_value,)
350
+ return outputs
351
+
352
+
353
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Nezha
354
+ class NezhaSelfOutput(nn.Module):
355
+ def __init__(self, config):
356
+ super().__init__()
357
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
358
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
359
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
360
+
361
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
362
+ hidden_states = self.dense(hidden_states)
363
+ hidden_states = self.dropout(hidden_states)
364
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
365
+ return hidden_states
366
+
367
+
368
+ class NezhaAttention(nn.Module):
369
+ def __init__(self, config):
370
+ super().__init__()
371
+ self.self = NezhaSelfAttention(config)
372
+ self.output = NezhaSelfOutput(config)
373
+ self.pruned_heads = set()
374
+
375
+ def prune_heads(self, heads):
376
+ if len(heads) == 0:
377
+ return
378
+ heads, index = find_pruneable_heads_and_indices(
379
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
380
+ )
381
+
382
+ # Prune linear layers
383
+ self.self.query = prune_linear_layer(self.self.query, index)
384
+ self.self.key = prune_linear_layer(self.self.key, index)
385
+ self.self.value = prune_linear_layer(self.self.value, index)
386
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
387
+
388
+ # Update hyper params and store pruned heads
389
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
390
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
391
+ self.pruned_heads = self.pruned_heads.union(heads)
392
+
393
+ def forward(
394
+ self,
395
+ hidden_states: torch.Tensor,
396
+ attention_mask: Optional[torch.FloatTensor] = None,
397
+ head_mask: Optional[torch.FloatTensor] = None,
398
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
399
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
400
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
401
+ output_attentions: Optional[bool] = False,
402
+ ) -> Tuple[torch.Tensor]:
403
+ self_outputs = self.self(
404
+ hidden_states,
405
+ attention_mask,
406
+ head_mask,
407
+ encoder_hidden_states,
408
+ encoder_attention_mask,
409
+ past_key_value,
410
+ output_attentions,
411
+ )
412
+ attention_output = self.output(self_outputs[0], hidden_states)
413
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
414
+ return outputs
415
+
416
+
417
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Nezha
418
+ class NezhaIntermediate(nn.Module):
419
+ def __init__(self, config):
420
+ super().__init__()
421
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
422
+ if isinstance(config.hidden_act, str):
423
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
424
+ else:
425
+ self.intermediate_act_fn = config.hidden_act
426
+
427
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
428
+ hidden_states = self.dense(hidden_states)
429
+ hidden_states = self.intermediate_act_fn(hidden_states)
430
+ return hidden_states
431
+
432
+
433
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Nezha
434
+ class NezhaOutput(nn.Module):
435
+ def __init__(self, config):
436
+ super().__init__()
437
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
438
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
439
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
440
+
441
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
442
+ hidden_states = self.dense(hidden_states)
443
+ hidden_states = self.dropout(hidden_states)
444
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
445
+ return hidden_states
446
+
447
+
448
+ class NezhaLayer(nn.Module):
449
+ def __init__(self, config):
450
+ super().__init__()
451
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
452
+ self.seq_len_dim = 1
453
+ self.attention = NezhaAttention(config)
454
+ self.is_decoder = config.is_decoder
455
+ self.add_cross_attention = config.add_cross_attention
456
+ if self.add_cross_attention:
457
+ if not self.is_decoder:
458
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
459
+ self.crossattention = NezhaAttention(config)
460
+ self.intermediate = NezhaIntermediate(config)
461
+ self.output = NezhaOutput(config)
462
+
463
+ def forward(
464
+ self,
465
+ hidden_states: torch.Tensor,
466
+ attention_mask: Optional[torch.FloatTensor] = None,
467
+ head_mask: Optional[torch.FloatTensor] = None,
468
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
469
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
470
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
471
+ output_attentions: Optional[bool] = False,
472
+ ) -> Tuple[torch.Tensor]:
473
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
474
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
475
+ self_attention_outputs = self.attention(
476
+ hidden_states,
477
+ attention_mask,
478
+ head_mask,
479
+ output_attentions=output_attentions,
480
+ past_key_value=self_attn_past_key_value,
481
+ )
482
+ attention_output = self_attention_outputs[0]
483
+
484
+ # if decoder, the last output is tuple of self-attn cache
485
+ if self.is_decoder:
486
+ outputs = self_attention_outputs[1:-1]
487
+ present_key_value = self_attention_outputs[-1]
488
+ else:
489
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
490
+
491
+ cross_attn_present_key_value = None
492
+ if self.is_decoder and encoder_hidden_states is not None:
493
+ if not hasattr(self, "crossattention"):
494
+ raise ValueError(
495
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
496
+ " by setting `config.add_cross_attention=True`"
497
+ )
498
+
499
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
500
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
501
+ cross_attention_outputs = self.crossattention(
502
+ attention_output,
503
+ attention_mask,
504
+ head_mask,
505
+ encoder_hidden_states,
506
+ encoder_attention_mask,
507
+ cross_attn_past_key_value,
508
+ output_attentions,
509
+ )
510
+ attention_output = cross_attention_outputs[0]
511
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
512
+
513
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
514
+ cross_attn_present_key_value = cross_attention_outputs[-1]
515
+ present_key_value = present_key_value + cross_attn_present_key_value
516
+
517
+ layer_output = apply_chunking_to_forward(
518
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
519
+ )
520
+ outputs = (layer_output,) + outputs
521
+
522
+ # if decoder, return the attn key/values as the last output
523
+ if self.is_decoder:
524
+ outputs = outputs + (present_key_value,)
525
+
526
+ return outputs
527
+
528
+ def feed_forward_chunk(self, attention_output):
529
+ intermediate_output = self.intermediate(attention_output)
530
+ layer_output = self.output(intermediate_output, attention_output)
531
+ return layer_output
532
+
533
+
534
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Nezha
535
+ class NezhaEncoder(nn.Module):
536
+ def __init__(self, config):
537
+ super().__init__()
538
+ self.config = config
539
+ self.layer = nn.ModuleList([NezhaLayer(config) for _ in range(config.num_hidden_layers)])
540
+ self.gradient_checkpointing = False
541
+
542
+ def forward(
543
+ self,
544
+ hidden_states: torch.Tensor,
545
+ attention_mask: Optional[torch.FloatTensor] = None,
546
+ head_mask: Optional[torch.FloatTensor] = None,
547
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
548
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
549
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
550
+ use_cache: Optional[bool] = None,
551
+ output_attentions: Optional[bool] = False,
552
+ output_hidden_states: Optional[bool] = False,
553
+ return_dict: Optional[bool] = True,
554
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
555
+ all_hidden_states = () if output_hidden_states else None
556
+ all_self_attentions = () if output_attentions else None
557
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
558
+
559
+ if self.gradient_checkpointing and self.training:
560
+ if use_cache:
561
+ logger.warning_once(
562
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
563
+ )
564
+ use_cache = False
565
+
566
+ next_decoder_cache = () if use_cache else None
567
+ for i, layer_module in enumerate(self.layer):
568
+ if output_hidden_states:
569
+ all_hidden_states = all_hidden_states + (hidden_states,)
570
+
571
+ layer_head_mask = head_mask[i] if head_mask is not None else None
572
+ past_key_value = past_key_values[i] if past_key_values is not None else None
573
+
574
+ if self.gradient_checkpointing and self.training:
575
+ layer_outputs = self._gradient_checkpointing_func(
576
+ layer_module.__call__,
577
+ hidden_states,
578
+ attention_mask,
579
+ layer_head_mask,
580
+ encoder_hidden_states,
581
+ encoder_attention_mask,
582
+ past_key_value,
583
+ output_attentions,
584
+ )
585
+ else:
586
+ layer_outputs = layer_module(
587
+ hidden_states,
588
+ attention_mask,
589
+ layer_head_mask,
590
+ encoder_hidden_states,
591
+ encoder_attention_mask,
592
+ past_key_value,
593
+ output_attentions,
594
+ )
595
+
596
+ hidden_states = layer_outputs[0]
597
+ if use_cache:
598
+ next_decoder_cache += (layer_outputs[-1],)
599
+ if output_attentions:
600
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
601
+ if self.config.add_cross_attention:
602
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
603
+
604
+ if output_hidden_states:
605
+ all_hidden_states = all_hidden_states + (hidden_states,)
606
+
607
+ if not return_dict:
608
+ return tuple(
609
+ v
610
+ for v in [
611
+ hidden_states,
612
+ next_decoder_cache,
613
+ all_hidden_states,
614
+ all_self_attentions,
615
+ all_cross_attentions,
616
+ ]
617
+ if v is not None
618
+ )
619
+ return BaseModelOutputWithPastAndCrossAttentions(
620
+ last_hidden_state=hidden_states,
621
+ past_key_values=next_decoder_cache,
622
+ hidden_states=all_hidden_states,
623
+ attentions=all_self_attentions,
624
+ cross_attentions=all_cross_attentions,
625
+ )
626
+
627
+
628
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Nezha
629
+ class NezhaPooler(nn.Module):
630
+ def __init__(self, config):
631
+ super().__init__()
632
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
633
+ self.activation = nn.Tanh()
634
+
635
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
636
+ # We "pool" the model by simply taking the hidden state corresponding
637
+ # to the first token.
638
+ first_token_tensor = hidden_states[:, 0]
639
+ pooled_output = self.dense(first_token_tensor)
640
+ pooled_output = self.activation(pooled_output)
641
+ return pooled_output
642
+
643
+
644
+ # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Nezha
645
+ class NezhaPredictionHeadTransform(nn.Module):
646
+ def __init__(self, config):
647
+ super().__init__()
648
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
649
+ if isinstance(config.hidden_act, str):
650
+ self.transform_act_fn = ACT2FN[config.hidden_act]
651
+ else:
652
+ self.transform_act_fn = config.hidden_act
653
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
654
+
655
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
656
+ hidden_states = self.dense(hidden_states)
657
+ hidden_states = self.transform_act_fn(hidden_states)
658
+ hidden_states = self.LayerNorm(hidden_states)
659
+ return hidden_states
660
+
661
+
662
+ # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Nezha
663
+ class NezhaLMPredictionHead(nn.Module):
664
+ def __init__(self, config):
665
+ super().__init__()
666
+ self.transform = NezhaPredictionHeadTransform(config)
667
+
668
+ # The output weights are the same as the input embeddings, but there is
669
+ # an output-only bias for each token.
670
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
671
+
672
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
673
+
674
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
675
+ self.decoder.bias = self.bias
676
+
677
+ def forward(self, hidden_states):
678
+ hidden_states = self.transform(hidden_states)
679
+ hidden_states = self.decoder(hidden_states)
680
+ return hidden_states
681
+
682
+
683
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Nezha
684
+ class NezhaOnlyMLMHead(nn.Module):
685
+ def __init__(self, config):
686
+ super().__init__()
687
+ self.predictions = NezhaLMPredictionHead(config)
688
+
689
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
690
+ prediction_scores = self.predictions(sequence_output)
691
+ return prediction_scores
692
+
693
+
694
+ # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Nezha
695
+ class NezhaOnlyNSPHead(nn.Module):
696
+ def __init__(self, config):
697
+ super().__init__()
698
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
699
+
700
+ def forward(self, pooled_output):
701
+ seq_relationship_score = self.seq_relationship(pooled_output)
702
+ return seq_relationship_score
703
+
704
+
705
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Nezha
706
+ class NezhaPreTrainingHeads(nn.Module):
707
+ def __init__(self, config):
708
+ super().__init__()
709
+ self.predictions = NezhaLMPredictionHead(config)
710
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
711
+
712
+ def forward(self, sequence_output, pooled_output):
713
+ prediction_scores = self.predictions(sequence_output)
714
+ seq_relationship_score = self.seq_relationship(pooled_output)
715
+ return prediction_scores, seq_relationship_score
716
+
717
+
718
+ class NezhaPreTrainedModel(PreTrainedModel):
719
+ """
720
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
721
+ models.
722
+ """
723
+
724
+ config_class = NezhaConfig
725
+ load_tf_weights = load_tf_weights_in_nezha
726
+ base_model_prefix = "nezha"
727
+ supports_gradient_checkpointing = True
728
+
729
+ def _init_weights(self, module):
730
+ """Initialize the weights"""
731
+ if isinstance(module, nn.Linear):
732
+ # Slightly different from the TF version which uses truncated_normal for initialization
733
+ # cf https://github.com/pytorch/pytorch/pull/5617
734
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
735
+ if module.bias is not None:
736
+ module.bias.data.zero_()
737
+ elif isinstance(module, nn.Embedding):
738
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
739
+ if module.padding_idx is not None:
740
+ module.weight.data[module.padding_idx].zero_()
741
+ elif isinstance(module, nn.LayerNorm):
742
+ module.bias.data.zero_()
743
+ module.weight.data.fill_(1.0)
744
+
745
+
746
+ @dataclass
747
+ class NezhaForPreTrainingOutput(ModelOutput):
748
+ """
749
+ Output type of [`NezhaForPreTraining`].
750
+
751
+ Args:
752
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
753
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
754
+ (classification) loss.
755
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
756
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
757
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
758
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
759
+ before SoftMax).
760
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
761
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
762
+ shape `(batch_size, sequence_length, hidden_size)`.
763
+
764
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
765
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
766
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
767
+ sequence_length)`.
768
+
769
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
770
+ heads.
771
+ """
772
+
773
+ loss: Optional[torch.FloatTensor] = None
774
+ prediction_logits: torch.FloatTensor = None
775
+ seq_relationship_logits: torch.FloatTensor = None
776
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
777
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
778
+
779
+
780
+ NEZHA_START_DOCSTRING = r"""
781
+
782
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
783
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
784
+ etc.)
785
+
786
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
787
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
788
+ and behavior.
789
+
790
+ Parameters:
791
+ config ([`NezhaConfig`]): Model configuration class with all the parameters of the model.
792
+ Initializing with a config file does not load the weights associated with the model, only the
793
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
794
+ """
795
+
796
+ NEZHA_INPUTS_DOCSTRING = r"""
797
+ Args:
798
+ input_ids (`torch.LongTensor` of shape `({0})`):
799
+ Indices of input sequence tokens in the vocabulary.
800
+
801
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
802
+ [`PreTrainedTokenizer.__call__`] for details.
803
+
804
+ [What are input IDs?](../glossary#input-ids)
805
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
806
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
807
+
808
+ - 1 for tokens that are **not masked**,
809
+ - 0 for tokens that are **masked**.
810
+
811
+ [What are attention masks?](../glossary#attention-mask)
812
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
813
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
814
+ 1]`:
815
+
816
+ - 0 corresponds to a *sentence A* token,
817
+ - 1 corresponds to a *sentence B* token.
818
+
819
+ [What are token type IDs?](../glossary#token-type-ids)
820
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
821
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
822
+
823
+ - 1 indicates the head is **not masked**,
824
+ - 0 indicates the head is **masked**.
825
+
826
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
827
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
828
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
829
+ model's internal embedding lookup matrix.
830
+ output_attentions (`bool`, *optional*):
831
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
832
+ tensors for more detail.
833
+ output_hidden_states (`bool`, *optional*):
834
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
835
+ more detail.
836
+ return_dict (`bool`, *optional*):
837
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
838
+ """
839
+
840
+
841
+ @add_start_docstrings(
842
+ "The bare Nezha Model transformer outputting raw hidden-states without any specific head on top.",
843
+ NEZHA_START_DOCSTRING,
844
+ )
845
+ class NezhaModel(NezhaPreTrainedModel):
846
+ """
847
+
848
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
849
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
850
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
851
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
852
+
853
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
854
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
855
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
856
+ """
857
+
858
+ def __init__(self, config, add_pooling_layer=True):
859
+ super().__init__(config)
860
+ self.config = config
861
+
862
+ self.embeddings = NezhaEmbeddings(config)
863
+ self.encoder = NezhaEncoder(config)
864
+
865
+ self.pooler = NezhaPooler(config) if add_pooling_layer else None
866
+
867
+ # Initialize weights and apply final processing
868
+ self.post_init()
869
+
870
+ def get_input_embeddings(self):
871
+ return self.embeddings.word_embeddings
872
+
873
+ def set_input_embeddings(self, value):
874
+ self.embeddings.word_embeddings = value
875
+
876
+ def _prune_heads(self, heads_to_prune):
877
+ """
878
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
879
+ class PreTrainedModel
880
+ """
881
+ for layer, heads in heads_to_prune.items():
882
+ self.encoder.layer[layer].attention.prune_heads(heads)
883
+
884
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
885
+ @add_code_sample_docstrings(
886
+ checkpoint=_CHECKPOINT_FOR_DOC,
887
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
888
+ config_class=_CONFIG_FOR_DOC,
889
+ )
890
+ def forward(
891
+ self,
892
+ input_ids: Optional[torch.Tensor] = None,
893
+ attention_mask: Optional[torch.Tensor] = None,
894
+ token_type_ids: Optional[torch.Tensor] = None,
895
+ head_mask: Optional[torch.Tensor] = None,
896
+ inputs_embeds: Optional[torch.Tensor] = None,
897
+ encoder_hidden_states: Optional[torch.Tensor] = None,
898
+ encoder_attention_mask: Optional[torch.Tensor] = None,
899
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
900
+ use_cache: Optional[bool] = None,
901
+ output_attentions: Optional[bool] = None,
902
+ output_hidden_states: Optional[bool] = None,
903
+ return_dict: Optional[bool] = None,
904
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
905
+ r"""
906
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
907
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
908
+ the model is configured as a decoder.
909
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
910
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
911
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
912
+
913
+ - 1 for tokens that are **not masked**,
914
+ - 0 for tokens that are **masked**.
915
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
916
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
917
+
918
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
919
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
920
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
921
+ use_cache (`bool`, *optional*):
922
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
923
+ `past_key_values`).
924
+ """
925
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
926
+ output_hidden_states = (
927
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
928
+ )
929
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
930
+
931
+ if self.config.is_decoder:
932
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
933
+ else:
934
+ use_cache = False
935
+
936
+ if input_ids is not None and inputs_embeds is not None:
937
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
938
+ elif input_ids is not None:
939
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
940
+ input_shape = input_ids.size()
941
+ elif inputs_embeds is not None:
942
+ input_shape = inputs_embeds.size()[:-1]
943
+ else:
944
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
945
+
946
+ batch_size, seq_length = input_shape
947
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
948
+
949
+ # past_key_values_length
950
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
951
+
952
+ if attention_mask is None:
953
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
954
+
955
+ if token_type_ids is None:
956
+ if hasattr(self.embeddings, "token_type_ids"):
957
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
958
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
959
+ token_type_ids = buffered_token_type_ids_expanded
960
+ else:
961
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
962
+
963
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
964
+ # ourselves in which case we just need to make it broadcastable to all heads.
965
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
966
+
967
+ # If a 2D or 3D attention mask is provided for the cross-attention
968
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
969
+ if self.config.is_decoder and encoder_hidden_states is not None:
970
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
971
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
972
+ if encoder_attention_mask is None:
973
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
974
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
975
+ else:
976
+ encoder_extended_attention_mask = None
977
+
978
+ # Prepare head mask if needed
979
+ # 1.0 in head_mask indicate we keep the head
980
+ # attention_probs has shape bsz x n_heads x N x N
981
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
982
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
983
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
984
+
985
+ embedding_output = self.embeddings(
986
+ input_ids=input_ids,
987
+ token_type_ids=token_type_ids,
988
+ inputs_embeds=inputs_embeds,
989
+ )
990
+ encoder_outputs = self.encoder(
991
+ embedding_output,
992
+ attention_mask=extended_attention_mask,
993
+ head_mask=head_mask,
994
+ encoder_hidden_states=encoder_hidden_states,
995
+ encoder_attention_mask=encoder_extended_attention_mask,
996
+ past_key_values=past_key_values,
997
+ use_cache=use_cache,
998
+ output_attentions=output_attentions,
999
+ output_hidden_states=output_hidden_states,
1000
+ return_dict=return_dict,
1001
+ )
1002
+ sequence_output = encoder_outputs[0]
1003
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1004
+
1005
+ if not return_dict:
1006
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1007
+
1008
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1009
+ last_hidden_state=sequence_output,
1010
+ pooler_output=pooled_output,
1011
+ past_key_values=encoder_outputs.past_key_values,
1012
+ hidden_states=encoder_outputs.hidden_states,
1013
+ attentions=encoder_outputs.attentions,
1014
+ cross_attentions=encoder_outputs.cross_attentions,
1015
+ )
1016
+
1017
+
1018
+ @add_start_docstrings(
1019
+ """
1020
+ Nezha Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
1021
+ sentence prediction (classification)` head.
1022
+ """,
1023
+ NEZHA_START_DOCSTRING,
1024
+ )
1025
+ class NezhaForPreTraining(NezhaPreTrainedModel):
1026
+ _tied_weights_keys = ["cls.predictions.decoder"]
1027
+
1028
+ def __init__(self, config):
1029
+ super().__init__(config)
1030
+
1031
+ self.nezha = NezhaModel(config)
1032
+ self.cls = NezhaPreTrainingHeads(config)
1033
+
1034
+ # Initialize weights and apply final processing
1035
+ self.post_init()
1036
+
1037
+ def get_output_embeddings(self):
1038
+ return self.cls.predictions.decoder
1039
+
1040
+ def set_output_embeddings(self, new_embeddings):
1041
+ self.cls.predictions.decoder = new_embeddings
1042
+
1043
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1044
+ @replace_return_docstrings(output_type=NezhaForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1045
+ def forward(
1046
+ self,
1047
+ input_ids: Optional[torch.Tensor] = None,
1048
+ attention_mask: Optional[torch.Tensor] = None,
1049
+ token_type_ids: Optional[torch.Tensor] = None,
1050
+ head_mask: Optional[torch.Tensor] = None,
1051
+ inputs_embeds: Optional[torch.Tensor] = None,
1052
+ labels: Optional[torch.Tensor] = None,
1053
+ next_sentence_label: Optional[torch.Tensor] = None,
1054
+ output_attentions: Optional[bool] = None,
1055
+ output_hidden_states: Optional[bool] = None,
1056
+ return_dict: Optional[bool] = None,
1057
+ ) -> Union[Tuple[torch.Tensor], NezhaForPreTrainingOutput]:
1058
+ r"""
1059
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1060
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1061
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
1062
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1063
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1064
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
1065
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1066
+
1067
+ - 0 indicates sequence B is a continuation of sequence A,
1068
+ - 1 indicates sequence B is a random sequence.
1069
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1070
+ Used to hide legacy arguments that have been deprecated.
1071
+
1072
+ Returns:
1073
+
1074
+ Example:
1075
+
1076
+ ```python
1077
+ >>> from transformers import AutoTokenizer, NezhaForPreTraining
1078
+ >>> import torch
1079
+
1080
+ >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base")
1081
+ >>> model = NezhaForPreTraining.from_pretrained("sijunhe/nezha-cn-base")
1082
+
1083
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
1084
+ >>> outputs = model(**inputs)
1085
+
1086
+ >>> prediction_logits = outputs.prediction_logits
1087
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
1088
+ ```
1089
+ """
1090
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1091
+
1092
+ outputs = self.nezha(
1093
+ input_ids,
1094
+ attention_mask=attention_mask,
1095
+ token_type_ids=token_type_ids,
1096
+ head_mask=head_mask,
1097
+ inputs_embeds=inputs_embeds,
1098
+ output_attentions=output_attentions,
1099
+ output_hidden_states=output_hidden_states,
1100
+ return_dict=return_dict,
1101
+ )
1102
+
1103
+ sequence_output, pooled_output = outputs[:2]
1104
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
1105
+
1106
+ total_loss = None
1107
+ if labels is not None and next_sentence_label is not None:
1108
+ loss_fct = CrossEntropyLoss()
1109
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1110
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1111
+ total_loss = masked_lm_loss + next_sentence_loss
1112
+
1113
+ if not return_dict:
1114
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
1115
+ return ((total_loss,) + output) if total_loss is not None else output
1116
+
1117
+ return NezhaForPreTrainingOutput(
1118
+ loss=total_loss,
1119
+ prediction_logits=prediction_scores,
1120
+ seq_relationship_logits=seq_relationship_score,
1121
+ hidden_states=outputs.hidden_states,
1122
+ attentions=outputs.attentions,
1123
+ )
1124
+
1125
+
1126
+ @add_start_docstrings("""Nezha Model with a `language modeling` head on top.""", NEZHA_START_DOCSTRING)
1127
+ class NezhaForMaskedLM(NezhaPreTrainedModel):
1128
+ _tied_weights_keys = ["cls.predictions.decoder"]
1129
+
1130
+ def __init__(self, config):
1131
+ super().__init__(config)
1132
+
1133
+ if config.is_decoder:
1134
+ logger.warning(
1135
+ "If you want to use `NezhaForMaskedLM` make sure `config.is_decoder=False` for "
1136
+ "bi-directional self-attention."
1137
+ )
1138
+
1139
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
1140
+ self.cls = NezhaOnlyMLMHead(config)
1141
+
1142
+ # Initialize weights and apply final processing
1143
+ self.post_init()
1144
+
1145
+ def get_output_embeddings(self):
1146
+ return self.cls.predictions.decoder
1147
+
1148
+ def set_output_embeddings(self, new_embeddings):
1149
+ self.cls.predictions.decoder = new_embeddings
1150
+
1151
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1152
+ @add_code_sample_docstrings(
1153
+ checkpoint=_CHECKPOINT_FOR_DOC,
1154
+ output_type=MaskedLMOutput,
1155
+ config_class=_CONFIG_FOR_DOC,
1156
+ )
1157
+ def forward(
1158
+ self,
1159
+ input_ids: Optional[torch.Tensor] = None,
1160
+ attention_mask: Optional[torch.Tensor] = None,
1161
+ token_type_ids: Optional[torch.Tensor] = None,
1162
+ head_mask: Optional[torch.Tensor] = None,
1163
+ inputs_embeds: Optional[torch.Tensor] = None,
1164
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1165
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1166
+ labels: Optional[torch.Tensor] = None,
1167
+ output_attentions: Optional[bool] = None,
1168
+ output_hidden_states: Optional[bool] = None,
1169
+ return_dict: Optional[bool] = None,
1170
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1171
+ r"""
1172
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1173
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1174
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1175
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1176
+ """
1177
+
1178
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1179
+
1180
+ outputs = self.nezha(
1181
+ input_ids,
1182
+ attention_mask=attention_mask,
1183
+ token_type_ids=token_type_ids,
1184
+ head_mask=head_mask,
1185
+ inputs_embeds=inputs_embeds,
1186
+ encoder_hidden_states=encoder_hidden_states,
1187
+ encoder_attention_mask=encoder_attention_mask,
1188
+ output_attentions=output_attentions,
1189
+ output_hidden_states=output_hidden_states,
1190
+ return_dict=return_dict,
1191
+ )
1192
+
1193
+ sequence_output = outputs[0]
1194
+ prediction_scores = self.cls(sequence_output)
1195
+
1196
+ masked_lm_loss = None
1197
+ if labels is not None:
1198
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1199
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1200
+
1201
+ if not return_dict:
1202
+ output = (prediction_scores,) + outputs[2:]
1203
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1204
+
1205
+ return MaskedLMOutput(
1206
+ loss=masked_lm_loss,
1207
+ logits=prediction_scores,
1208
+ hidden_states=outputs.hidden_states,
1209
+ attentions=outputs.attentions,
1210
+ )
1211
+
1212
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1213
+ input_shape = input_ids.shape
1214
+ effective_batch_size = input_shape[0]
1215
+
1216
+ # add a dummy token
1217
+ if self.config.pad_token_id is None:
1218
+ raise ValueError("The PAD token should be defined for generation")
1219
+
1220
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1221
+ dummy_token = torch.full(
1222
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1223
+ )
1224
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1225
+
1226
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1227
+
1228
+
1229
+ @add_start_docstrings(
1230
+ """Nezha Model with a `next sentence prediction (classification)` head on top.""",
1231
+ NEZHA_START_DOCSTRING,
1232
+ )
1233
+ class NezhaForNextSentencePrediction(NezhaPreTrainedModel):
1234
+ def __init__(self, config):
1235
+ super().__init__(config)
1236
+
1237
+ self.nezha = NezhaModel(config)
1238
+ self.cls = NezhaOnlyNSPHead(config)
1239
+
1240
+ # Initialize weights and apply final processing
1241
+ self.post_init()
1242
+
1243
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1244
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
1245
+ def forward(
1246
+ self,
1247
+ input_ids: Optional[torch.Tensor] = None,
1248
+ attention_mask: Optional[torch.Tensor] = None,
1249
+ token_type_ids: Optional[torch.Tensor] = None,
1250
+ head_mask: Optional[torch.Tensor] = None,
1251
+ inputs_embeds: Optional[torch.Tensor] = None,
1252
+ labels: Optional[torch.Tensor] = None,
1253
+ output_attentions: Optional[bool] = None,
1254
+ output_hidden_states: Optional[bool] = None,
1255
+ return_dict: Optional[bool] = None,
1256
+ **kwargs,
1257
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
1258
+ r"""
1259
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1260
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
1261
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
1262
+
1263
+ - 0 indicates sequence B is a continuation of sequence A,
1264
+ - 1 indicates sequence B is a random sequence.
1265
+
1266
+ Returns:
1267
+
1268
+ Example:
1269
+
1270
+ ```python
1271
+ >>> from transformers import AutoTokenizer, NezhaForNextSentencePrediction
1272
+ >>> import torch
1273
+
1274
+ >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base")
1275
+ >>> model = NezhaForNextSentencePrediction.from_pretrained("sijunhe/nezha-cn-base")
1276
+
1277
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1278
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1279
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
1280
+
1281
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
1282
+ >>> logits = outputs.logits
1283
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1284
+ ```
1285
+ """
1286
+
1287
+ if "next_sentence_label" in kwargs:
1288
+ warnings.warn(
1289
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
1290
+ " `labels` instead.",
1291
+ FutureWarning,
1292
+ )
1293
+ labels = kwargs.pop("next_sentence_label")
1294
+
1295
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1296
+
1297
+ outputs = self.nezha(
1298
+ input_ids,
1299
+ attention_mask=attention_mask,
1300
+ token_type_ids=token_type_ids,
1301
+ head_mask=head_mask,
1302
+ inputs_embeds=inputs_embeds,
1303
+ output_attentions=output_attentions,
1304
+ output_hidden_states=output_hidden_states,
1305
+ return_dict=return_dict,
1306
+ )
1307
+
1308
+ pooled_output = outputs[1]
1309
+
1310
+ seq_relationship_scores = self.cls(pooled_output)
1311
+
1312
+ next_sentence_loss = None
1313
+ if labels is not None:
1314
+ loss_fct = CrossEntropyLoss()
1315
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
1316
+
1317
+ if not return_dict:
1318
+ output = (seq_relationship_scores,) + outputs[2:]
1319
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
1320
+
1321
+ return NextSentencePredictorOutput(
1322
+ loss=next_sentence_loss,
1323
+ logits=seq_relationship_scores,
1324
+ hidden_states=outputs.hidden_states,
1325
+ attentions=outputs.attentions,
1326
+ )
1327
+
1328
+
1329
+ @add_start_docstrings(
1330
+ """
1331
+ Nezha Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1332
+ output) e.g. for GLUE tasks.
1333
+ """,
1334
+ NEZHA_START_DOCSTRING,
1335
+ )
1336
+ class NezhaForSequenceClassification(NezhaPreTrainedModel):
1337
+ def __init__(self, config):
1338
+ super().__init__(config)
1339
+ self.num_labels = config.num_labels
1340
+ self.config = config
1341
+
1342
+ self.nezha = NezhaModel(config)
1343
+ classifier_dropout = (
1344
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1345
+ )
1346
+ self.dropout = nn.Dropout(classifier_dropout)
1347
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1348
+
1349
+ # Initialize weights and apply final processing
1350
+ self.post_init()
1351
+
1352
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1353
+ @add_code_sample_docstrings(
1354
+ checkpoint=_CHECKPOINT_FOR_DOC,
1355
+ output_type=SequenceClassifierOutput,
1356
+ config_class=_CONFIG_FOR_DOC,
1357
+ )
1358
+ def forward(
1359
+ self,
1360
+ input_ids: Optional[torch.Tensor] = None,
1361
+ attention_mask: Optional[torch.Tensor] = None,
1362
+ token_type_ids: Optional[torch.Tensor] = None,
1363
+ head_mask: Optional[torch.Tensor] = None,
1364
+ inputs_embeds: Optional[torch.Tensor] = None,
1365
+ labels: Optional[torch.Tensor] = None,
1366
+ output_attentions: Optional[bool] = None,
1367
+ output_hidden_states: Optional[bool] = None,
1368
+ return_dict: Optional[bool] = None,
1369
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
1370
+ r"""
1371
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1372
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1373
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1374
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1375
+ """
1376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1377
+
1378
+ outputs = self.nezha(
1379
+ input_ids,
1380
+ attention_mask=attention_mask,
1381
+ token_type_ids=token_type_ids,
1382
+ head_mask=head_mask,
1383
+ inputs_embeds=inputs_embeds,
1384
+ output_attentions=output_attentions,
1385
+ output_hidden_states=output_hidden_states,
1386
+ return_dict=return_dict,
1387
+ )
1388
+
1389
+ pooled_output = outputs[1]
1390
+
1391
+ pooled_output = self.dropout(pooled_output)
1392
+ logits = self.classifier(pooled_output)
1393
+
1394
+ loss = None
1395
+ if labels is not None:
1396
+ if self.config.problem_type is None:
1397
+ if self.num_labels == 1:
1398
+ self.config.problem_type = "regression"
1399
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1400
+ self.config.problem_type = "single_label_classification"
1401
+ else:
1402
+ self.config.problem_type = "multi_label_classification"
1403
+
1404
+ if self.config.problem_type == "regression":
1405
+ loss_fct = MSELoss()
1406
+ if self.num_labels == 1:
1407
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1408
+ else:
1409
+ loss = loss_fct(logits, labels)
1410
+ elif self.config.problem_type == "single_label_classification":
1411
+ loss_fct = CrossEntropyLoss()
1412
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1413
+ elif self.config.problem_type == "multi_label_classification":
1414
+ loss_fct = BCEWithLogitsLoss()
1415
+ loss = loss_fct(logits, labels)
1416
+ if not return_dict:
1417
+ output = (logits,) + outputs[2:]
1418
+ return ((loss,) + output) if loss is not None else output
1419
+
1420
+ return SequenceClassifierOutput(
1421
+ loss=loss,
1422
+ logits=logits,
1423
+ hidden_states=outputs.hidden_states,
1424
+ attentions=outputs.attentions,
1425
+ )
1426
+
1427
+
1428
+ @add_start_docstrings(
1429
+ """
1430
+ Nezha Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
1431
+ softmax) e.g. for RocStories/SWAG tasks.
1432
+ """,
1433
+ NEZHA_START_DOCSTRING,
1434
+ )
1435
+ class NezhaForMultipleChoice(NezhaPreTrainedModel):
1436
+ def __init__(self, config):
1437
+ super().__init__(config)
1438
+
1439
+ self.nezha = NezhaModel(config)
1440
+ classifier_dropout = (
1441
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1442
+ )
1443
+ self.dropout = nn.Dropout(classifier_dropout)
1444
+ self.classifier = nn.Linear(config.hidden_size, 1)
1445
+
1446
+ # Initialize weights and apply final processing
1447
+ self.post_init()
1448
+
1449
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
1450
+ @add_code_sample_docstrings(
1451
+ checkpoint=_CHECKPOINT_FOR_DOC,
1452
+ output_type=MultipleChoiceModelOutput,
1453
+ config_class=_CONFIG_FOR_DOC,
1454
+ )
1455
+ def forward(
1456
+ self,
1457
+ input_ids: Optional[torch.Tensor] = None,
1458
+ attention_mask: Optional[torch.Tensor] = None,
1459
+ token_type_ids: Optional[torch.Tensor] = None,
1460
+ head_mask: Optional[torch.Tensor] = None,
1461
+ inputs_embeds: Optional[torch.Tensor] = None,
1462
+ labels: Optional[torch.Tensor] = None,
1463
+ output_attentions: Optional[bool] = None,
1464
+ output_hidden_states: Optional[bool] = None,
1465
+ return_dict: Optional[bool] = None,
1466
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
1467
+ r"""
1468
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1469
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1470
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1471
+ `input_ids` above)
1472
+ """
1473
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1474
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1475
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1476
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1477
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1478
+ inputs_embeds = (
1479
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1480
+ if inputs_embeds is not None
1481
+ else None
1482
+ )
1483
+
1484
+ outputs = self.nezha(
1485
+ input_ids,
1486
+ attention_mask=attention_mask,
1487
+ token_type_ids=token_type_ids,
1488
+ head_mask=head_mask,
1489
+ inputs_embeds=inputs_embeds,
1490
+ output_attentions=output_attentions,
1491
+ output_hidden_states=output_hidden_states,
1492
+ return_dict=return_dict,
1493
+ )
1494
+
1495
+ pooled_output = outputs[1]
1496
+ print(pooled_output.shape)
1497
+ pooled_output = self.dropout(pooled_output)
1498
+ logits = self.classifier(pooled_output)
1499
+ print(logits.shape)
1500
+ print(num_choices)
1501
+ reshaped_logits = logits.view(-1, num_choices)
1502
+
1503
+ loss = None
1504
+ if labels is not None:
1505
+ loss_fct = CrossEntropyLoss()
1506
+ loss = loss_fct(reshaped_logits, labels)
1507
+
1508
+ if not return_dict:
1509
+ output = (reshaped_logits,) + outputs[2:]
1510
+ return ((loss,) + output) if loss is not None else output
1511
+
1512
+ return MultipleChoiceModelOutput(
1513
+ loss=loss,
1514
+ logits=reshaped_logits,
1515
+ hidden_states=outputs.hidden_states,
1516
+ attentions=outputs.attentions,
1517
+ )
1518
+
1519
+
1520
+ @add_start_docstrings(
1521
+ """
1522
+ Nezha Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1523
+ Named-Entity-Recognition (NER) tasks.
1524
+ """,
1525
+ NEZHA_START_DOCSTRING,
1526
+ )
1527
+ class NezhaForTokenClassification(NezhaPreTrainedModel):
1528
+ def __init__(self, config):
1529
+ super().__init__(config)
1530
+ self.num_labels = config.num_labels
1531
+
1532
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
1533
+ classifier_dropout = (
1534
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1535
+ )
1536
+ self.dropout = nn.Dropout(classifier_dropout)
1537
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1538
+
1539
+ # Initialize weights and apply final processing
1540
+ self.post_init()
1541
+
1542
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1543
+ @add_code_sample_docstrings(
1544
+ checkpoint=_CHECKPOINT_FOR_DOC,
1545
+ output_type=TokenClassifierOutput,
1546
+ config_class=_CONFIG_FOR_DOC,
1547
+ )
1548
+ def forward(
1549
+ self,
1550
+ input_ids: Optional[torch.Tensor] = None,
1551
+ attention_mask: Optional[torch.Tensor] = None,
1552
+ token_type_ids: Optional[torch.Tensor] = None,
1553
+ head_mask: Optional[torch.Tensor] = None,
1554
+ inputs_embeds: Optional[torch.Tensor] = None,
1555
+ labels: Optional[torch.Tensor] = None,
1556
+ output_attentions: Optional[bool] = None,
1557
+ output_hidden_states: Optional[bool] = None,
1558
+ return_dict: Optional[bool] = None,
1559
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1560
+ r"""
1561
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1562
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1563
+ """
1564
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1565
+
1566
+ outputs = self.nezha(
1567
+ input_ids,
1568
+ attention_mask=attention_mask,
1569
+ token_type_ids=token_type_ids,
1570
+ head_mask=head_mask,
1571
+ inputs_embeds=inputs_embeds,
1572
+ output_attentions=output_attentions,
1573
+ output_hidden_states=output_hidden_states,
1574
+ return_dict=return_dict,
1575
+ )
1576
+
1577
+ sequence_output = outputs[0]
1578
+
1579
+ sequence_output = self.dropout(sequence_output)
1580
+ logits = self.classifier(sequence_output)
1581
+
1582
+ loss = None
1583
+ if labels is not None:
1584
+ loss_fct = CrossEntropyLoss()
1585
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1586
+
1587
+ if not return_dict:
1588
+ output = (logits,) + outputs[2:]
1589
+ return ((loss,) + output) if loss is not None else output
1590
+
1591
+ return TokenClassifierOutput(
1592
+ loss=loss,
1593
+ logits=logits,
1594
+ hidden_states=outputs.hidden_states,
1595
+ attentions=outputs.attentions,
1596
+ )
1597
+
1598
+
1599
+ @add_start_docstrings(
1600
+ """
1601
+ Nezha Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1602
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1603
+ """,
1604
+ NEZHA_START_DOCSTRING,
1605
+ )
1606
+ class NezhaForQuestionAnswering(NezhaPreTrainedModel):
1607
+ def __init__(self, config):
1608
+ super().__init__(config)
1609
+ self.num_labels = config.num_labels
1610
+
1611
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
1612
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1613
+
1614
+ # Initialize weights and apply final processing
1615
+ self.post_init()
1616
+
1617
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1618
+ @add_code_sample_docstrings(
1619
+ checkpoint=_CHECKPOINT_FOR_DOC,
1620
+ output_type=QuestionAnsweringModelOutput,
1621
+ config_class=_CONFIG_FOR_DOC,
1622
+ )
1623
+ def forward(
1624
+ self,
1625
+ input_ids: Optional[torch.Tensor] = None,
1626
+ attention_mask: Optional[torch.Tensor] = None,
1627
+ token_type_ids: Optional[torch.Tensor] = None,
1628
+ head_mask: Optional[torch.Tensor] = None,
1629
+ inputs_embeds: Optional[torch.Tensor] = None,
1630
+ start_positions: Optional[torch.Tensor] = None,
1631
+ end_positions: Optional[torch.Tensor] = None,
1632
+ output_attentions: Optional[bool] = None,
1633
+ output_hidden_states: Optional[bool] = None,
1634
+ return_dict: Optional[bool] = None,
1635
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1636
+ r"""
1637
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1638
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1639
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1640
+ are not taken into account for computing the loss.
1641
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1642
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1643
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1644
+ are not taken into account for computing the loss.
1645
+ """
1646
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1647
+
1648
+ outputs = self.nezha(
1649
+ input_ids,
1650
+ attention_mask=attention_mask,
1651
+ token_type_ids=token_type_ids,
1652
+ head_mask=head_mask,
1653
+ inputs_embeds=inputs_embeds,
1654
+ output_attentions=output_attentions,
1655
+ output_hidden_states=output_hidden_states,
1656
+ return_dict=return_dict,
1657
+ )
1658
+
1659
+ sequence_output = outputs[0]
1660
+
1661
+ logits = self.qa_outputs(sequence_output)
1662
+ start_logits, end_logits = logits.split(1, dim=-1)
1663
+ start_logits = start_logits.squeeze(-1).contiguous()
1664
+ end_logits = end_logits.squeeze(-1).contiguous()
1665
+
1666
+ total_loss = None
1667
+ if start_positions is not None and end_positions is not None:
1668
+ # If we are on multi-GPU, split add a dimension
1669
+ if len(start_positions.size()) > 1:
1670
+ start_positions = start_positions.squeeze(-1)
1671
+ if len(end_positions.size()) > 1:
1672
+ end_positions = end_positions.squeeze(-1)
1673
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1674
+ ignored_index = start_logits.size(1)
1675
+ start_positions = start_positions.clamp(0, ignored_index)
1676
+ end_positions = end_positions.clamp(0, ignored_index)
1677
+
1678
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1679
+ start_loss = loss_fct(start_logits, start_positions)
1680
+ end_loss = loss_fct(end_logits, end_positions)
1681
+ total_loss = (start_loss + end_loss) / 2
1682
+
1683
+ if not return_dict:
1684
+ output = (start_logits, end_logits) + outputs[2:]
1685
+ return ((total_loss,) + output) if total_loss is not None else output
1686
+
1687
+ return QuestionAnsweringModelOutput(
1688
+ loss=total_loss,
1689
+ start_logits=start_logits,
1690
+ end_logits=end_logits,
1691
+ hidden_states=outputs.hidden_states,
1692
+ attentions=outputs.attentions,
1693
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tokenizers_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
27
+ "tokenization_perceiver": ["PerceiverTokenizer"],
28
+ }
29
+
30
+ try:
31
+ if not is_vision_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["feature_extraction_perceiver"] = ["PerceiverFeatureExtractor"]
37
+ _import_structure["image_processing_perceiver"] = ["PerceiverImageProcessor"]
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_perceiver"] = [
46
+ "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "PerceiverForImageClassificationConvProcessing",
48
+ "PerceiverForImageClassificationFourier",
49
+ "PerceiverForImageClassificationLearned",
50
+ "PerceiverForMaskedLM",
51
+ "PerceiverForMultimodalAutoencoding",
52
+ "PerceiverForOpticalFlow",
53
+ "PerceiverForSequenceClassification",
54
+ "PerceiverLayer",
55
+ "PerceiverModel",
56
+ "PerceiverPreTrainedModel",
57
+ ]
58
+
59
+
60
+ if TYPE_CHECKING:
61
+ from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
62
+ from .tokenization_perceiver import PerceiverTokenizer
63
+
64
+ try:
65
+ if not is_vision_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .feature_extraction_perceiver import PerceiverFeatureExtractor
71
+ from .image_processing_perceiver import PerceiverImageProcessor
72
+
73
+ try:
74
+ if not is_torch_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ from .modeling_perceiver import (
80
+ PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
81
+ PerceiverForImageClassificationConvProcessing,
82
+ PerceiverForImageClassificationFourier,
83
+ PerceiverForImageClassificationLearned,
84
+ PerceiverForMaskedLM,
85
+ PerceiverForMultimodalAutoencoding,
86
+ PerceiverForOpticalFlow,
87
+ PerceiverForSequenceClassification,
88
+ PerceiverLayer,
89
+ PerceiverModel,
90
+ PerceiverPreTrainedModel,
91
+ )
92
+
93
+ else:
94
+ import sys
95
+
96
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/configuration_perceiver.cpython-310.pyc ADDED
Binary file (9.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/convert_perceiver_haiku_to_pytorch.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/feature_extraction_perceiver.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/image_processing_perceiver.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc ADDED
Binary file (105 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/tokenization_perceiver.cpython-310.pyc ADDED
Binary file (7.17 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/configuration_perceiver.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Deepmind and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Perceiver model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional, Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...feature_extraction_utils import FeatureExtractionMixin
22
+ from ...onnx import OnnxConfig
23
+ from ...onnx.utils import compute_effective_axis_dimension
24
+ from ...tokenization_utils_base import PreTrainedTokenizerBase
25
+ from ...utils import TensorType, logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class PerceiverConfig(PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an
37
+ Perceiver model according to the specified arguments, defining the model architecture. Instantiating a
38
+ configuration with the defaults will yield a similar configuration to that of the Perceiver
39
+ [deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+ Args:
45
+ num_latents (`int`, *optional*, defaults to 256):
46
+ The number of latents.
47
+ d_latents (`int`, *optional*, defaults to 1280):
48
+ Dimension of the latent embeddings.
49
+ d_model (`int`, *optional*, defaults to 768):
50
+ Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no
51
+ preprocessor is provided.
52
+ num_blocks (`int`, *optional*, defaults to 1):
53
+ Number of blocks in the Transformer encoder.
54
+ num_self_attends_per_block (`int`, *optional*, defaults to 26):
55
+ The number of self-attention layers per block.
56
+ num_self_attention_heads (`int`, *optional*, defaults to 8):
57
+ Number of attention heads for each self-attention layer in the Transformer encoder.
58
+ num_cross_attention_heads (`int`, *optional*, defaults to 8):
59
+ Number of attention heads for each cross-attention layer in the Transformer encoder.
60
+ qk_channels (`int`, *optional*):
61
+ Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
62
+ layers of the encoder. Will default to preserving the dimension of the queries if not specified.
63
+ v_channels (`int`, *optional*):
64
+ Dimension to project the values before applying attention in the cross-attention and self-attention layers
65
+ of the encoder. Will default to preserving the dimension of the queries if not specified.
66
+ cross_attention_shape_for_attention (`str`, *optional*, defaults to `"kv"`):
67
+ Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
68
+ self_attention_widening_factor (`int`, *optional*, defaults to 1):
69
+ Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
70
+ cross_attention_widening_factor (`int`, *optional*, defaults to 1):
71
+ Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
72
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
73
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
74
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
75
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
76
+ The dropout ratio for the attention probabilities.
77
+ initializer_range (`float`, *optional*, defaults to 0.02):
78
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
79
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
80
+ The epsilon used by the layer normalization layers.
81
+ use_query_residual (`float`, *optional*, defaults to `True`):
82
+ Whether to add a query residual in the cross-attention layer of the encoder.
83
+ vocab_size (`int`, *optional*, defaults to 262):
84
+ Vocabulary size for the masked language modeling model.
85
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
86
+ The maximum sequence length that the masked language modeling model might ever be used with. Typically set
87
+ this to something large just in case (e.g., 512 or 1024 or 2048).
88
+ image_size (`int`, *optional*, defaults to 56):
89
+ Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`].
90
+ train_size (`List[int]`, *optional*, defaults to `[368, 496]`):
91
+ Training size of the images for the optical flow model.
92
+ num_frames (`int`, *optional*, defaults to 16):
93
+ Number of video frames used for the multimodal autoencoding model.
94
+ audio_samples_per_frame (`int`, *optional*, defaults to 1920):
95
+ Number of audio samples per frame for the multimodal autoencoding model.
96
+ samples_per_patch (`int`, *optional*, defaults to 16):
97
+ Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.
98
+ output_shape (`List[int]`, *optional*, defaults to `[1, 16, 224, 224]`):
99
+ Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal
100
+ autoencoding model. This excludes the channel dimension.
101
+ output_num_channels (`int`, *optional*, defaults to 512):
102
+ Number of output channels for each modalitiy decoder.
103
+
104
+ Example:
105
+
106
+ ```python
107
+ >>> from transformers import PerceiverModel, PerceiverConfig
108
+
109
+ >>> # Initializing a Perceiver deepmind/language-perceiver style configuration
110
+ >>> configuration = PerceiverConfig()
111
+
112
+ >>> # Initializing a model from the deepmind/language-perceiver style configuration
113
+ >>> model = PerceiverModel(configuration)
114
+
115
+ >>> # Accessing the model configuration
116
+ >>> configuration = model.config
117
+ ```"""
118
+
119
+ model_type = "perceiver"
120
+
121
+ def __init__(
122
+ self,
123
+ num_latents=256,
124
+ d_latents=1280,
125
+ d_model=768,
126
+ num_blocks=1,
127
+ num_self_attends_per_block=26,
128
+ num_self_attention_heads=8,
129
+ num_cross_attention_heads=8,
130
+ qk_channels=None,
131
+ v_channels=None,
132
+ cross_attention_shape_for_attention="kv",
133
+ self_attention_widening_factor=1,
134
+ cross_attention_widening_factor=1,
135
+ hidden_act="gelu",
136
+ attention_probs_dropout_prob=0.1,
137
+ initializer_range=0.02,
138
+ layer_norm_eps=1e-12,
139
+ use_query_residual=True,
140
+ vocab_size=262,
141
+ max_position_embeddings=2048,
142
+ image_size=56,
143
+ train_size=[368, 496],
144
+ num_frames=16,
145
+ audio_samples_per_frame=1920,
146
+ samples_per_patch=16,
147
+ output_shape=[1, 16, 224, 224],
148
+ output_num_channels=512,
149
+ _label_trainable_num_channels=1024,
150
+ **kwargs,
151
+ ):
152
+ super().__init__(**kwargs)
153
+
154
+ self.num_latents = num_latents
155
+ self.d_latents = d_latents
156
+ self.d_model = d_model
157
+ self.num_blocks = num_blocks
158
+ self.num_self_attends_per_block = num_self_attends_per_block
159
+ self.num_self_attention_heads = num_self_attention_heads
160
+ self.num_cross_attention_heads = num_cross_attention_heads
161
+ self.qk_channels = qk_channels
162
+ self.v_channels = v_channels
163
+ self.cross_attention_shape_for_attention = cross_attention_shape_for_attention
164
+ self.self_attention_widening_factor = self_attention_widening_factor
165
+ self.cross_attention_widening_factor = cross_attention_widening_factor
166
+ self.hidden_act = hidden_act
167
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
168
+ self.initializer_range = initializer_range
169
+ self.layer_norm_eps = layer_norm_eps
170
+ self.use_query_residual = use_query_residual
171
+ # masked language modeling attributes
172
+ self.vocab_size = vocab_size
173
+ self.max_position_embeddings = max_position_embeddings
174
+ # image classification attributes
175
+ self.image_size = image_size
176
+ # flow attributes
177
+ self.train_size = train_size
178
+ # multimodal autoencoding attributes
179
+ self.num_frames = num_frames
180
+ self.audio_samples_per_frame = audio_samples_per_frame
181
+ self.samples_per_patch = samples_per_patch
182
+ self.output_shape = output_shape
183
+ self.output_num_channels = output_num_channels
184
+ self._label_trainable_num_channels = _label_trainable_num_channels
185
+
186
+
187
+ class PerceiverOnnxConfig(OnnxConfig):
188
+ @property
189
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
190
+ if self.task == "multiple-choice":
191
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
192
+ else:
193
+ dynamic_axis = {0: "batch", 1: "sequence"}
194
+ return OrderedDict(
195
+ [
196
+ ("inputs", dynamic_axis),
197
+ ("attention_mask", dynamic_axis),
198
+ ]
199
+ )
200
+
201
+ @property
202
+ def atol_for_validation(self) -> float:
203
+ return 1e-4
204
+
205
+ def generate_dummy_inputs(
206
+ self,
207
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
208
+ batch_size: int = -1,
209
+ seq_length: int = -1,
210
+ num_choices: int = -1,
211
+ is_pair: bool = False,
212
+ framework: Optional[TensorType] = None,
213
+ num_channels: int = 3,
214
+ image_width: int = 40,
215
+ image_height: int = 40,
216
+ ) -> Mapping[str, Any]:
217
+ # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
218
+
219
+ if isinstance(preprocessor, PreTrainedTokenizerBase):
220
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
221
+ batch_size = compute_effective_axis_dimension(
222
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
223
+ )
224
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
225
+ token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
226
+ seq_length = compute_effective_axis_dimension(
227
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
228
+ )
229
+ # Generate dummy inputs according to compute batch and sequence
230
+ dummy_input = [" ".join(["a"]) * seq_length] * batch_size
231
+ inputs = dict(preprocessor(dummy_input, return_tensors=framework))
232
+ inputs["inputs"] = inputs.pop("input_ids")
233
+ return inputs
234
+ elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values":
235
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
236
+ batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
237
+ dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
238
+ inputs = dict(preprocessor(images=dummy_input, return_tensors=framework))
239
+ inputs["inputs"] = inputs.pop("pixel_values")
240
+ return inputs
241
+ else:
242
+ raise ValueError(
243
+ "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor."
244
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Perceiver checkpoints originally implemented in Haiku."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ import pickle
21
+ from pathlib import Path
22
+
23
+ import haiku as hk
24
+ import numpy as np
25
+ import requests
26
+ import torch
27
+ from huggingface_hub import hf_hub_download
28
+ from PIL import Image
29
+
30
+ from transformers import (
31
+ PerceiverConfig,
32
+ PerceiverForImageClassificationConvProcessing,
33
+ PerceiverForImageClassificationFourier,
34
+ PerceiverForImageClassificationLearned,
35
+ PerceiverForMaskedLM,
36
+ PerceiverForMultimodalAutoencoding,
37
+ PerceiverForOpticalFlow,
38
+ PerceiverImageProcessor,
39
+ PerceiverTokenizer,
40
+ )
41
+ from transformers.utils import logging
42
+
43
+
44
+ logging.set_verbosity_info()
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ def prepare_img():
49
+ # We will verify our results on an image of a dog
50
+ url = "https://storage.googleapis.com/perceiver_io/dalmation.jpg"
51
+ im = Image.open(requests.get(url, stream=True).raw)
52
+ return im
53
+
54
+
55
+ def rename_keys(state_dict, architecture):
56
+ for name in list(state_dict):
57
+ param = state_dict.pop(name)
58
+
59
+ # PREPROCESSORS
60
+ # rename text preprocessor embeddings (for MLM model)
61
+ name = name.replace("embed/embeddings", "input_preprocessor.embeddings.weight")
62
+ if name.startswith("trainable_position_encoding/pos_embs"):
63
+ name = name.replace(
64
+ "trainable_position_encoding/pos_embs", "input_preprocessor.position_embeddings.weight"
65
+ )
66
+
67
+ # rename image preprocessor embeddings (for image classification model with learned position embeddings)
68
+ name = name.replace("image_preprocessor/~/conv2_d/w", "input_preprocessor.convnet_1x1.weight")
69
+ name = name.replace("image_preprocessor/~/conv2_d/b", "input_preprocessor.convnet_1x1.bias")
70
+ name = name.replace(
71
+ "image_preprocessor/~_build_network_inputs/trainable_position_encoding/pos_embs",
72
+ "input_preprocessor.position_embeddings.position_embeddings",
73
+ )
74
+ name = name.replace(
75
+ "image_preprocessor/~_build_network_inputs/position_encoding_projector/linear/w",
76
+ "input_preprocessor.positions_projection.weight",
77
+ )
78
+ name = name.replace(
79
+ "image_preprocessor/~_build_network_inputs/position_encoding_projector/linear/b",
80
+ "input_preprocessor.positions_projection.bias",
81
+ )
82
+
83
+ # rename image preprocessor embeddings (for image classification model with conv processing)
84
+ if "counter" in name or "hidden" in name:
85
+ continue
86
+ name = name.replace(
87
+ "image_preprocessor/~/conv2_d_downsample/~/conv/w", "input_preprocessor.convnet.conv.weight"
88
+ )
89
+ name = name.replace(
90
+ "image_preprocessor/~/conv2_d_downsample/~/batchnorm/offset", "input_preprocessor.convnet.batchnorm.bias"
91
+ )
92
+ name = name.replace(
93
+ "image_preprocessor/~/conv2_d_downsample/~/batchnorm/scale", "input_preprocessor.convnet.batchnorm.weight"
94
+ )
95
+ name = name.replace(
96
+ "image_preprocessor/~/conv2_d_downsample/~/batchnorm/~/mean_ema/average",
97
+ "input_preprocessor.convnet.batchnorm.running_mean",
98
+ )
99
+ name = name.replace(
100
+ "image_preprocessor/~/conv2_d_downsample/~/batchnorm/~/var_ema/average",
101
+ "input_preprocessor.convnet.batchnorm.running_var",
102
+ )
103
+
104
+ # rename image preprocessor embeddings (for optical flow model)
105
+ name = name.replace("image_preprocessor/patches_linear/b", "input_preprocessor.conv_after_patches.bias")
106
+ name = name.replace("image_preprocessor/patches_linear/w", "input_preprocessor.conv_after_patches.weight")
107
+
108
+ # rename multimodal preprocessor embeddings
109
+ name = name.replace("multimodal_preprocessor/audio_mask_token/pos_embs", "input_preprocessor.mask.audio")
110
+ name = name.replace("multimodal_preprocessor/audio_padding/pos_embs", "input_preprocessor.padding.audio")
111
+ name = name.replace("multimodal_preprocessor/image_mask_token/pos_embs", "input_preprocessor.mask.image")
112
+ name = name.replace("multimodal_preprocessor/image_padding/pos_embs", "input_preprocessor.padding.image")
113
+ name = name.replace("multimodal_preprocessor/label_mask_token/pos_embs", "input_preprocessor.mask.label")
114
+ name = name.replace("multimodal_preprocessor/label_padding/pos_embs", "input_preprocessor.padding.label")
115
+
116
+ # DECODERS
117
+ # rename prefix of decoders
118
+ # multimodal autoencoding model
119
+ name = name.replace(
120
+ "multimodal_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention."
121
+ )
122
+ name = name.replace("multimodal_decoder/~decoder_query/audio_padding/pos_embs", "decoder.padding.audio")
123
+ name = name.replace("multimodal_decoder/~decoder_query/image_padding/pos_embs", "decoder.padding.image")
124
+ name = name.replace("multimodal_decoder/~decoder_query/label_padding/pos_embs", "decoder.padding.label")
125
+ name = name.replace("multimodal_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias")
126
+ name = name.replace("multimodal_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight")
127
+ if architecture == "multimodal_autoencoding":
128
+ name = name.replace(
129
+ "classification_decoder/~/basic_decoder/~/trainable_position_encoding/pos_embs",
130
+ "decoder.modalities.label.decoder.output_position_encodings.position_embeddings",
131
+ )
132
+ # flow model
133
+ name = name.replace(
134
+ "flow_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention."
135
+ )
136
+ name = name.replace("flow_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight")
137
+ name = name.replace("flow_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias")
138
+ # image models
139
+ name = name.replace(
140
+ "classification_decoder/~/basic_decoder/~/trainable_position_encoding/pos_embs",
141
+ "decoder.decoder.output_position_encodings.position_embeddings",
142
+ )
143
+ name = name.replace(
144
+ "basic_decoder/~/trainable_position_encoding/pos_embs",
145
+ "decoder.output_position_encodings.position_embeddings",
146
+ )
147
+ name = name.replace(
148
+ "classification_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention."
149
+ )
150
+ name = name.replace("classification_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias")
151
+ name = name.replace("classification_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight")
152
+ name = name = name.replace("classification_decoder/~/basic_decoder/~/", "decoder.decoder.")
153
+ name = name.replace("basic_decoder/cross_attention/", "decoder.decoding_cross_attention.")
154
+ name = name.replace("basic_decoder/~/", "decoder.")
155
+
156
+ # POSTPROCESSORS
157
+ name = name.replace(
158
+ "projection_postprocessor/linear/b", "output_postprocessor.modalities.image.classifier.bias"
159
+ )
160
+ name = name.replace(
161
+ "projection_postprocessor/linear/w", "output_postprocessor.modalities.image.classifier.weight"
162
+ )
163
+ name = name.replace(
164
+ "classification_postprocessor/linear/b", "output_postprocessor.modalities.label.classifier.bias"
165
+ )
166
+ name = name.replace(
167
+ "classification_postprocessor/linear/w", "output_postprocessor.modalities.label.classifier.weight"
168
+ )
169
+ name = name.replace("audio_postprocessor/linear/b", "output_postprocessor.modalities.audio.classifier.bias")
170
+ name = name.replace("audio_postprocessor/linear/w", "output_postprocessor.modalities.audio.classifier.weight")
171
+
172
+ # PERCEIVER MODEL
173
+
174
+ # rename latent embeddings
175
+ name = name.replace("perceiver_encoder/~/trainable_position_encoding/pos_embs", "embeddings.latents")
176
+ # rename latent embeddings (for multimodal model)
177
+ name = name.replace("encoder/~/trainable_position_encoding/pos_embs", "embeddings.latents")
178
+
179
+ # rename prefixes
180
+ if name.startswith("perceiver_encoder/~/"):
181
+ if "self_attention" in name:
182
+ suffix = "self_attends."
183
+ else:
184
+ suffix = ""
185
+ name = name.replace("perceiver_encoder/~/", "encoder." + suffix)
186
+ if name.startswith("encoder/~/"):
187
+ if "self_attention" in name:
188
+ suffix = "self_attends."
189
+ else:
190
+ suffix = ""
191
+ name = name.replace("encoder/~/", "encoder." + suffix)
192
+ # rename layernorm parameters
193
+ if "offset" in name:
194
+ name = name.replace("offset", "bias")
195
+ if "scale" in name:
196
+ name = name.replace("scale", "weight")
197
+ # in HuggingFace, the layernorm in between attention + MLP is just called "layernorm"
198
+ # rename layernorm in between attention + MLP of cross-attention
199
+ if "cross_attention" in name and "layer_norm_2" in name:
200
+ name = name.replace("layer_norm_2", "layernorm")
201
+ # rename layernorm in between attention + MLP of self-attention
202
+ if "self_attention" in name and "layer_norm_1" in name:
203
+ name = name.replace("layer_norm_1", "layernorm")
204
+
205
+ # in HuggingFace, the layernorms for queries + keys are called "layernorm1" and "layernorm2"
206
+ if "cross_attention" in name and "layer_norm_1" in name:
207
+ name = name.replace("layer_norm_1", "attention.self.layernorm2")
208
+ if "cross_attention" in name and "layer_norm" in name:
209
+ name = name.replace("layer_norm", "attention.self.layernorm1")
210
+ if "self_attention" in name and "layer_norm" in name:
211
+ name = name.replace("layer_norm", "attention.self.layernorm1")
212
+
213
+ # rename special characters by dots
214
+ name = name.replace("-", ".")
215
+ name = name.replace("/", ".")
216
+ # rename keys, queries, values and output of attention layers
217
+ if ("cross_attention" in name or "self_attention" in name) and "mlp" not in name:
218
+ if "linear.b" in name:
219
+ name = name.replace("linear.b", "self.query.bias")
220
+ if "linear.w" in name:
221
+ name = name.replace("linear.w", "self.query.weight")
222
+ if "linear_1.b" in name:
223
+ name = name.replace("linear_1.b", "self.key.bias")
224
+ if "linear_1.w" in name:
225
+ name = name.replace("linear_1.w", "self.key.weight")
226
+ if "linear_2.b" in name:
227
+ name = name.replace("linear_2.b", "self.value.bias")
228
+ if "linear_2.w" in name:
229
+ name = name.replace("linear_2.w", "self.value.weight")
230
+ if "linear_3.b" in name:
231
+ name = name.replace("linear_3.b", "output.dense.bias")
232
+ if "linear_3.w" in name:
233
+ name = name.replace("linear_3.w", "output.dense.weight")
234
+ if "self_attention_" in name:
235
+ name = name.replace("self_attention_", "")
236
+ if "self_attention" in name:
237
+ name = name.replace("self_attention", "0")
238
+ # rename dense layers of 2-layer MLP
239
+ if "mlp" in name:
240
+ if "linear.b" in name:
241
+ name = name.replace("linear.b", "dense1.bias")
242
+ if "linear.w" in name:
243
+ name = name.replace("linear.w", "dense1.weight")
244
+ if "linear_1.b" in name:
245
+ name = name.replace("linear_1.b", "dense2.bias")
246
+ if "linear_1.w" in name:
247
+ name = name.replace("linear_1.w", "dense2.weight")
248
+
249
+ # finally, TRANSPOSE if kernel and not embedding layer, and set value
250
+ if name[-6:] == "weight" and "embeddings" not in name:
251
+ param = np.transpose(param)
252
+
253
+ # if batchnorm, we need to squeeze it
254
+ if "batchnorm" in name:
255
+ param = np.squeeze(param)
256
+
257
+ if "embedding_decoder" not in name:
258
+ state_dict["perceiver." + name] = torch.from_numpy(param)
259
+ else:
260
+ state_dict[name] = torch.from_numpy(param)
261
+
262
+
263
+ @torch.no_grad()
264
+ def convert_perceiver_checkpoint(pickle_file, pytorch_dump_folder_path, architecture="MLM"):
265
+ """
266
+ Copy/paste/tweak model's weights to our Perceiver structure.
267
+ """
268
+
269
+ # load parameters as FlatMapping data structure
270
+ with open(pickle_file, "rb") as f:
271
+ checkpoint = pickle.loads(f.read())
272
+
273
+ state = None
274
+ if isinstance(checkpoint, dict) and architecture in [
275
+ "image_classification",
276
+ "image_classification_fourier",
277
+ "image_classification_conv",
278
+ ]:
279
+ # the image classification_conv checkpoint also has batchnorm states (running_mean and running_var)
280
+ params = checkpoint["params"]
281
+ state = checkpoint["state"]
282
+ else:
283
+ params = checkpoint
284
+
285
+ # turn into initial state dict
286
+ state_dict = {}
287
+ for scope_name, parameters in hk.data_structures.to_mutable_dict(params).items():
288
+ for param_name, param in parameters.items():
289
+ state_dict[scope_name + "/" + param_name] = param
290
+
291
+ if state is not None:
292
+ # add state variables
293
+ for scope_name, parameters in hk.data_structures.to_mutable_dict(state).items():
294
+ for param_name, param in parameters.items():
295
+ state_dict[scope_name + "/" + param_name] = param
296
+
297
+ # rename keys
298
+ rename_keys(state_dict, architecture=architecture)
299
+
300
+ # load HuggingFace model
301
+ config = PerceiverConfig()
302
+ subsampling = None
303
+ repo_id = "huggingface/label-files"
304
+ if architecture == "MLM":
305
+ config.qk_channels = 8 * 32
306
+ config.v_channels = 1280
307
+ model = PerceiverForMaskedLM(config)
308
+ elif "image_classification" in architecture:
309
+ config.num_latents = 512
310
+ config.d_latents = 1024
311
+ config.d_model = 512
312
+ config.num_blocks = 8
313
+ config.num_self_attends_per_block = 6
314
+ config.num_cross_attention_heads = 1
315
+ config.num_self_attention_heads = 8
316
+ config.qk_channels = None
317
+ config.v_channels = None
318
+ # set labels
319
+ config.num_labels = 1000
320
+ filename = "imagenet-1k-id2label.json"
321
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
322
+ id2label = {int(k): v for k, v in id2label.items()}
323
+ config.id2label = id2label
324
+ config.label2id = {v: k for k, v in id2label.items()}
325
+ if architecture == "image_classification":
326
+ config.image_size = 224
327
+ model = PerceiverForImageClassificationLearned(config)
328
+ elif architecture == "image_classification_fourier":
329
+ config.d_model = 261
330
+ model = PerceiverForImageClassificationFourier(config)
331
+ elif architecture == "image_classification_conv":
332
+ config.d_model = 322
333
+ model = PerceiverForImageClassificationConvProcessing(config)
334
+ else:
335
+ raise ValueError(f"Architecture {architecture} not supported")
336
+ elif architecture == "optical_flow":
337
+ config.num_latents = 2048
338
+ config.d_latents = 512
339
+ config.d_model = 322
340
+ config.num_blocks = 1
341
+ config.num_self_attends_per_block = 24
342
+ config.num_self_attention_heads = 16
343
+ config.num_cross_attention_heads = 1
344
+ model = PerceiverForOpticalFlow(config)
345
+ elif architecture == "multimodal_autoencoding":
346
+ config.num_latents = 28 * 28 * 1
347
+ config.d_latents = 512
348
+ config.d_model = 704
349
+ config.num_blocks = 1
350
+ config.num_self_attends_per_block = 8
351
+ config.num_self_attention_heads = 8
352
+ config.num_cross_attention_heads = 1
353
+ config.num_labels = 700
354
+ # define dummy inputs + subsampling (as each forward pass is only on a chunk of image + audio data)
355
+ images = torch.randn((1, 16, 3, 224, 224))
356
+ audio = torch.randn((1, 30720, 1))
357
+ nchunks = 128
358
+ image_chunk_size = np.prod((16, 224, 224)) // nchunks
359
+ audio_chunk_size = audio.shape[1] // config.samples_per_patch // nchunks
360
+ # process the first chunk
361
+ chunk_idx = 0
362
+ subsampling = {
363
+ "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)),
364
+ "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)),
365
+ "label": None,
366
+ }
367
+ model = PerceiverForMultimodalAutoencoding(config)
368
+ # set labels
369
+ filename = "kinetics700-id2label.json"
370
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
371
+ id2label = {int(k): v for k, v in id2label.items()}
372
+ config.id2label = id2label
373
+ config.label2id = {v: k for k, v in id2label.items()}
374
+ else:
375
+ raise ValueError(f"Architecture {architecture} not supported")
376
+ model.eval()
377
+
378
+ # load weights
379
+ model.load_state_dict(state_dict)
380
+
381
+ # prepare dummy input
382
+ input_mask = None
383
+ if architecture == "MLM":
384
+ tokenizer = PerceiverTokenizer.from_pretrained("/Users/NielsRogge/Documents/Perceiver/Tokenizer files")
385
+ text = "This is an incomplete sentence where some words are missing."
386
+ encoding = tokenizer(text, padding="max_length", return_tensors="pt")
387
+ # mask " missing.". Note that the model performs much better if the masked chunk starts with a space.
388
+ encoding.input_ids[0, 51:60] = tokenizer.mask_token_id
389
+ inputs = encoding.input_ids
390
+ input_mask = encoding.attention_mask
391
+ elif architecture in ["image_classification", "image_classification_fourier", "image_classification_conv"]:
392
+ image_processor = PerceiverImageProcessor()
393
+ image = prepare_img()
394
+ encoding = image_processor(image, return_tensors="pt")
395
+ inputs = encoding.pixel_values
396
+ elif architecture == "optical_flow":
397
+ inputs = torch.randn(1, 2, 27, 368, 496)
398
+ elif architecture == "multimodal_autoencoding":
399
+ images = torch.randn((1, 16, 3, 224, 224))
400
+ audio = torch.randn((1, 30720, 1))
401
+ inputs = {"image": images, "audio": audio, "label": torch.zeros((images.shape[0], 700))}
402
+
403
+ # forward pass
404
+ if architecture == "multimodal_autoencoding":
405
+ outputs = model(inputs=inputs, attention_mask=input_mask, subsampled_output_points=subsampling)
406
+ else:
407
+ outputs = model(inputs=inputs, attention_mask=input_mask)
408
+ logits = outputs.logits
409
+
410
+ # verify logits
411
+ if not isinstance(logits, dict):
412
+ print("Shape of logits:", logits.shape)
413
+ else:
414
+ for k, v in logits.items():
415
+ print(f"Shape of logits of modality {k}", v.shape)
416
+
417
+ if architecture == "MLM":
418
+ expected_slice = torch.tensor(
419
+ [[-11.8336, -11.6850, -11.8483], [-12.8149, -12.5863, -12.7904], [-12.8440, -12.6410, -12.8646]]
420
+ )
421
+ assert torch.allclose(logits[0, :3, :3], expected_slice)
422
+ masked_tokens_predictions = logits[0, 51:60].argmax(dim=-1).tolist()
423
+ expected_list = [38, 115, 111, 121, 121, 111, 116, 109, 52]
424
+ assert masked_tokens_predictions == expected_list
425
+ print("Greedy predictions:")
426
+ print(masked_tokens_predictions)
427
+ print()
428
+ print("Predicted string:")
429
+ print(tokenizer.decode(masked_tokens_predictions))
430
+
431
+ elif architecture in ["image_classification", "image_classification_fourier", "image_classification_conv"]:
432
+ print("Predicted class:", model.config.id2label[logits.argmax(-1).item()])
433
+
434
+ # Finally, save files
435
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
436
+ print(f"Saving model to {pytorch_dump_folder_path}")
437
+ model.save_pretrained(pytorch_dump_folder_path)
438
+
439
+
440
+ if __name__ == "__main__":
441
+ parser = argparse.ArgumentParser()
442
+ # Required parameters
443
+ parser.add_argument(
444
+ "--pickle_file",
445
+ type=str,
446
+ default=None,
447
+ required=True,
448
+ help="Path to local pickle file of a Perceiver checkpoint you'd like to convert.",
449
+ )
450
+ parser.add_argument(
451
+ "--pytorch_dump_folder_path",
452
+ default=None,
453
+ type=str,
454
+ required=True,
455
+ help="Path to the output PyTorch model directory, provided as a string.",
456
+ )
457
+ parser.add_argument(
458
+ "--architecture",
459
+ default="MLM",
460
+ type=str,
461
+ help="""
462
+ Architecture, provided as a string. One of 'MLM', 'image_classification', image_classification_fourier',
463
+ image_classification_fourier', 'optical_flow' or 'multimodal_autoencoding'.
464
+ """,
465
+ )
466
+
467
+ args = parser.parse_args()
468
+ convert_perceiver_checkpoint(args.pickle_file, args.pytorch_dump_folder_path, args.architecture)
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/feature_extraction_perceiver.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for Perceiver."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_perceiver import PerceiverImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class PerceiverFeatureExtractor(PerceiverImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use PerceiverImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/image_processing_perceiver.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Perceiver."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import center_crop, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ IMAGENET_DEFAULT_MEAN,
25
+ IMAGENET_DEFAULT_STD,
26
+ ChannelDimension,
27
+ ImageInput,
28
+ PILImageResampling,
29
+ get_image_size,
30
+ infer_channel_dimension_format,
31
+ is_scaled_image,
32
+ make_list_of_images,
33
+ to_numpy_array,
34
+ valid_images,
35
+ validate_kwargs,
36
+ validate_preprocess_arguments,
37
+ )
38
+ from ...utils import TensorType, is_vision_available, logging
39
+
40
+
41
+ if is_vision_available():
42
+ import PIL
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ class PerceiverImageProcessor(BaseImageProcessor):
49
+ r"""
50
+ Constructs a Perceiver image processor.
51
+
52
+ Args:
53
+ do_center_crop (`bool`, `optional`, defaults to `True`):
54
+ Whether or not to center crop the image. If the input size if smaller than `crop_size` along any edge, the
55
+ image will be padded with zeros and then center cropped. Can be overridden by the `do_center_crop`
56
+ parameter in the `preprocess` method.
57
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
58
+ Desired output size when applying center-cropping. Can be overridden by the `crop_size` parameter in the
59
+ `preprocess` method.
60
+ do_resize (`bool`, *optional*, defaults to `True`):
61
+ Whether to resize the image to `(size["height"], size["width"])`. Can be overridden by the `do_resize`
62
+ parameter in the `preprocess` method.
63
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
64
+ Size of the image after resizing. Can be overridden by the `size` parameter in the `preprocess` method.
65
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
66
+ Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
67
+ in the `preprocess` method.
68
+ do_rescale (`bool`, *optional*, defaults to `True`):
69
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
70
+ parameter in the `preprocess` method.
71
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
72
+ Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
73
+ in the `preprocess` method.
74
+ do_normalize:
75
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
76
+ method.
77
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
78
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
79
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
80
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
81
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
82
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
83
+ """
84
+
85
+ model_input_names = ["pixel_values"]
86
+
87
+ def __init__(
88
+ self,
89
+ do_center_crop: bool = True,
90
+ crop_size: Dict[str, int] = None,
91
+ do_resize: bool = True,
92
+ size: Dict[str, int] = None,
93
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
94
+ do_rescale: bool = True,
95
+ rescale_factor: Union[int, float] = 1 / 255,
96
+ do_normalize: bool = True,
97
+ image_mean: Optional[Union[float, List[float]]] = None,
98
+ image_std: Optional[Union[float, List[float]]] = None,
99
+ **kwargs,
100
+ ) -> None:
101
+ super().__init__(**kwargs)
102
+ crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
103
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
104
+ size = size if size is not None else {"height": 224, "width": 224}
105
+ size = get_size_dict(size)
106
+
107
+ self.do_center_crop = do_center_crop
108
+ self.crop_size = crop_size
109
+ self.do_resize = do_resize
110
+ self.size = size
111
+ self.resample = resample
112
+ self.do_rescale = do_rescale
113
+ self.rescale_factor = rescale_factor
114
+ self.do_normalize = do_normalize
115
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
116
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
117
+ self._valid_processor_keys = [
118
+ "images",
119
+ "do_center_crop",
120
+ "crop_size",
121
+ "do_resize",
122
+ "size",
123
+ "resample",
124
+ "do_rescale",
125
+ "rescale_factor",
126
+ "do_normalize",
127
+ "image_mean",
128
+ "image_std",
129
+ "return_tensors",
130
+ "data_format",
131
+ "input_data_format",
132
+ ]
133
+
134
+ def center_crop(
135
+ self,
136
+ image: np.ndarray,
137
+ crop_size: Dict[str, int],
138
+ size: Optional[int] = None,
139
+ data_format: Optional[Union[str, ChannelDimension]] = None,
140
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
141
+ **kwargs,
142
+ ) -> np.ndarray:
143
+ """
144
+ Center crop an image to `(size["height"] / crop_size["height"] * min_dim, size["width"] / crop_size["width"] *
145
+ min_dim)`. Where `min_dim = min(size["height"], size["width"])`.
146
+
147
+ If the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then
148
+ center cropped.
149
+
150
+ Args:
151
+ image (`np.ndarray`):
152
+ Image to center crop.
153
+ crop_size (`Dict[str, int]`):
154
+ Desired output size after applying the center crop.
155
+ size (`Dict[str, int]`, *optional*):
156
+ Size of the image after resizing. If not provided, the self.size attribute will be used.
157
+ data_format (`str` or `ChannelDimension`, *optional*):
158
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
159
+ input_data_format (`str` or `ChannelDimension`, *optional*):
160
+ The channel dimension format of the input image. If not provided, it will be inferred.
161
+ """
162
+ size = self.size if size is None else size
163
+ size = get_size_dict(size)
164
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
165
+
166
+ height, width = get_image_size(image, channel_dim=input_data_format)
167
+ min_dim = min(height, width)
168
+ cropped_height = (size["height"] / crop_size["height"]) * min_dim
169
+ cropped_width = (size["width"] / crop_size["width"]) * min_dim
170
+ return center_crop(
171
+ image,
172
+ size=(cropped_height, cropped_width),
173
+ data_format=data_format,
174
+ input_data_format=input_data_format,
175
+ **kwargs,
176
+ )
177
+
178
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
179
+ def resize(
180
+ self,
181
+ image: np.ndarray,
182
+ size: Dict[str, int],
183
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
184
+ data_format: Optional[Union[str, ChannelDimension]] = None,
185
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
186
+ **kwargs,
187
+ ) -> np.ndarray:
188
+ """
189
+ Resize an image to `(size["height"], size["width"])`.
190
+
191
+ Args:
192
+ image (`np.ndarray`):
193
+ Image to resize.
194
+ size (`Dict[str, int]`):
195
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
196
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
197
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
198
+ data_format (`ChannelDimension` or `str`, *optional*):
199
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
200
+ image is used. Can be one of:
201
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
202
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
203
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
204
+ input_data_format (`ChannelDimension` or `str`, *optional*):
205
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
206
+ from the input image. Can be one of:
207
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
208
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
209
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
210
+
211
+ Returns:
212
+ `np.ndarray`: The resized image.
213
+ """
214
+ size = get_size_dict(size)
215
+ if "height" not in size or "width" not in size:
216
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
217
+ output_size = (size["height"], size["width"])
218
+ return resize(
219
+ image,
220
+ size=output_size,
221
+ resample=resample,
222
+ data_format=data_format,
223
+ input_data_format=input_data_format,
224
+ **kwargs,
225
+ )
226
+
227
+ def preprocess(
228
+ self,
229
+ images: ImageInput,
230
+ do_center_crop: Optional[bool] = None,
231
+ crop_size: Optional[Dict[str, int]] = None,
232
+ do_resize: Optional[bool] = None,
233
+ size: Optional[Dict[str, int]] = None,
234
+ resample: PILImageResampling = None,
235
+ do_rescale: Optional[bool] = None,
236
+ rescale_factor: Optional[float] = None,
237
+ do_normalize: Optional[bool] = None,
238
+ image_mean: Optional[Union[float, List[float]]] = None,
239
+ image_std: Optional[Union[float, List[float]]] = None,
240
+ return_tensors: Optional[Union[str, TensorType]] = None,
241
+ data_format: ChannelDimension = ChannelDimension.FIRST,
242
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
243
+ **kwargs,
244
+ ) -> PIL.Image.Image:
245
+ """
246
+ Preprocess an image or batch of images.
247
+
248
+ Args:
249
+ images (`ImageInput`):
250
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
251
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
252
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
253
+ Whether to center crop the image to `crop_size`.
254
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
255
+ Desired output size after applying the center crop.
256
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
257
+ Whether to resize the image.
258
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
259
+ Size of the image after resizing.
260
+ resample (`int`, *optional*, defaults to `self.resample`):
261
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
262
+ has an effect if `do_resize` is set to `True`.
263
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
264
+ Whether to rescale the image.
265
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
266
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
267
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
268
+ Whether to normalize the image.
269
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
270
+ Image mean.
271
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
272
+ Image standard deviation.
273
+ return_tensors (`str` or `TensorType`, *optional*):
274
+ The type of tensors to return. Can be one of:
275
+ - Unset: Return a list of `np.ndarray`.
276
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
277
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
278
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
279
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
280
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
281
+ The channel dimension format for the output image. Can be one of:
282
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
283
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
284
+ input_data_format (`ChannelDimension` or `str`, *optional*):
285
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
286
+ from the input image. Can be one of:
287
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
288
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
289
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
290
+ """
291
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
292
+ crop_size = crop_size if crop_size is not None else self.crop_size
293
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
294
+ do_resize = do_resize if do_resize is not None else self.do_resize
295
+ size = size if size is not None else self.size
296
+ size = get_size_dict(size)
297
+ resample = resample if resample is not None else self.resample
298
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
299
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
300
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
301
+ image_mean = image_mean if image_mean is not None else self.image_mean
302
+ image_std = image_std if image_std is not None else self.image_std
303
+
304
+ images = make_list_of_images(images)
305
+
306
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
307
+
308
+ if not valid_images(images):
309
+ raise ValueError(
310
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
311
+ "torch.Tensor, tf.Tensor or jax.ndarray."
312
+ )
313
+ validate_preprocess_arguments(
314
+ do_rescale=do_rescale,
315
+ rescale_factor=rescale_factor,
316
+ do_normalize=do_normalize,
317
+ image_mean=image_mean,
318
+ image_std=image_std,
319
+ do_center_crop=do_center_crop,
320
+ crop_size=crop_size,
321
+ do_resize=do_resize,
322
+ size=size,
323
+ resample=resample,
324
+ )
325
+
326
+ # All transformations expect numpy arrays.
327
+ images = [to_numpy_array(image) for image in images]
328
+
329
+ if is_scaled_image(images[0]) and do_rescale:
330
+ logger.warning_once(
331
+ "It looks like you are trying to rescale already rescaled images. If the input"
332
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
333
+ )
334
+
335
+ if input_data_format is None:
336
+ # We assume that all images have the same channel dimension format.
337
+ input_data_format = infer_channel_dimension_format(images[0])
338
+
339
+ if do_center_crop:
340
+ images = [
341
+ self.center_crop(image, crop_size, size=size, input_data_format=input_data_format) for image in images
342
+ ]
343
+
344
+ if do_resize:
345
+ images = [
346
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
347
+ for image in images
348
+ ]
349
+
350
+ if do_rescale:
351
+ images = [
352
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
353
+ for image in images
354
+ ]
355
+
356
+ if do_normalize:
357
+ images = [
358
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
359
+ for image in images
360
+ ]
361
+
362
+ images = [
363
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
364
+ ]
365
+
366
+ data = {"pixel_values": images}
367
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/modeling_perceiver.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/perceiver/tokenization_perceiver.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for Perceiver."""
16
+
17
+
18
+ from typing import Dict, List, Optional, Tuple
19
+
20
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ class PerceiverTokenizer(PreTrainedTokenizer):
28
+ """
29
+ Construct a Perceiver tokenizer. The Perceiver simply uses raw bytes utf-8 encoding.
30
+
31
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
32
+ this superclass for more information regarding those methods.
33
+
34
+ Args:
35
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
36
+ The token used for padding, for example when batching sequences of different lengths.
37
+ bos_token (`str`, *optional*, defaults to `"[BOS]"`):
38
+ The BOS token (reserved in the vocab, but not actually used).
39
+ eos_token (`str`, *optional*, defaults to `"[EOS]"`):
40
+ The end of sequence token (reserved in the vocab, but not actually used).
41
+
42
+ <Tip>
43
+
44
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
45
+ The token used is the `sep_token`.
46
+
47
+ </Tip>
48
+
49
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
50
+ The MASK token, useful for masked language modeling.
51
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
52
+ The CLS token (reserved in the vocab, but not actually used).
53
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
54
+ The separator token, which is used when building a sequence from two sequences.
55
+
56
+ """
57
+
58
+ model_input_names = ["input_ids", "attention_mask"]
59
+
60
+ def __init__(
61
+ self,
62
+ pad_token="[PAD]",
63
+ bos_token="[BOS]",
64
+ eos_token="[EOS]",
65
+ mask_token="[MASK]",
66
+ cls_token="[CLS]",
67
+ sep_token="[SEP]",
68
+ model_max_length=2048,
69
+ **kwargs,
70
+ ) -> None:
71
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
72
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
73
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
74
+ mask_token = AddedToken(mask_token, lstrip=False, rstrip=False) if isinstance(mask_token, str) else mask_token
75
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
76
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
77
+
78
+ self._utf_vocab_size = 2**8 # utf is 8 bits
79
+
80
+ # Since these tokens are not part of the vocabulary, we manually add them
81
+ self._added_tokens_decoder: Dict[str, int] = {
82
+ 0: pad_token,
83
+ 1: bos_token,
84
+ 2: eos_token,
85
+ 3: mask_token,
86
+ 4: cls_token,
87
+ 5: sep_token,
88
+ }
89
+ self._num_special_tokens = len(self._added_tokens_decoder)
90
+ super().__init__(
91
+ pad_token=pad_token,
92
+ bos_token=bos_token,
93
+ eos_token=eos_token,
94
+ mask_token=mask_token,
95
+ cls_token=cls_token,
96
+ sep_token=sep_token,
97
+ model_max_length=model_max_length,
98
+ **kwargs,
99
+ )
100
+
101
+ def get_vocab(self) -> Dict[str, int]:
102
+ vocab = {}
103
+ for i in range(self._utf_vocab_size):
104
+ token = chr(i)
105
+ vocab[token] = i + self._num_special_tokens
106
+ vocab.update(self.added_tokens_encoder)
107
+ return vocab
108
+
109
+ @property
110
+ def vocab_size(self):
111
+ return self._utf_vocab_size
112
+
113
+ def get_special_tokens_mask(
114
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
115
+ ) -> List[int]:
116
+ """
117
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
118
+ special tokens using the tokenizer `prepare_for_model` method.
119
+
120
+ Args:
121
+ token_ids_0 (`List[int]`):
122
+ List of IDs.
123
+ token_ids_1 (`List[int]`, *optional*):
124
+ Optional second list of IDs for sequence pairs.
125
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
126
+ Whether or not the token list is already formatted with special tokens for the model.
127
+
128
+ Returns:
129
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
130
+ """
131
+ if already_has_special_tokens:
132
+ return super().get_special_tokens_mask(
133
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
134
+ )
135
+
136
+ # normal case: some special tokens
137
+ if token_ids_1 is None:
138
+ return [1] + [0] * len(token_ids_0) + [1]
139
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
140
+
141
+ def build_inputs_with_special_tokens(
142
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
143
+ ) -> List[int]:
144
+ """
145
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks. A sequence has the
146
+ following format:
147
+
148
+ - single sequence: `[CLS] X [SEP]`
149
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
150
+
151
+ Args:
152
+ token_ids_0 (`List[int]`):
153
+ List of IDs to which the special tokens will be added.
154
+ token_ids_1 (`List[int]`, *optional*):
155
+ Optional second list of IDs for sequence pairs.
156
+
157
+ Returns:
158
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
159
+ """
160
+ if token_ids_1 is None:
161
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
162
+ else:
163
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 + [self.sep_token_id]
164
+
165
+ def _tokenize(self, text: str) -> List[str]:
166
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
167
+ tokens = [chr(i) for i in text.encode("utf-8")]
168
+ return tokens
169
+
170
+ def _convert_token_to_id(self, token):
171
+ """Converts a token (str) in an id using the vocab."""
172
+ if len(token) != 1:
173
+ token_id = self.unk_token_id
174
+ else:
175
+ token_id = ord(token) + self._num_special_tokens
176
+ return token_id
177
+
178
+ def _convert_id_to_token(self, index):
179
+ """Converts an index (integer) in a token (str) using the vocab."""
180
+ token = chr(index - self._num_special_tokens)
181
+ return token
182
+
183
+ # TODO @ArthurZ refactor this as well....
184
+ def convert_tokens_to_string(self, tokens):
185
+ """Converts a sequence of tokens (string) in a single string."""
186
+ bstring = b""
187
+ for token in tokens:
188
+ if token in self.added_tokens_encoder:
189
+ tok_string = str(token).encode("utf-8")
190
+ else:
191
+ tok_string = bytes([ord(token)])
192
+ bstring += tok_string
193
+ string = bstring.decode("utf-8", errors="replace")
194
+ return string
195
+
196
+ # PerceiverTokenizer has no vocab file
197
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
198
+ return ()
llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__init__.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_squeezebert": [
22
+ "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "SqueezeBertConfig",
24
+ "SqueezeBertOnnxConfig",
25
+ ],
26
+ "tokenization_squeezebert": ["SqueezeBertTokenizer"],
27
+ }
28
+
29
+ try:
30
+ if not is_tokenizers_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_squeezebert_fast"] = ["SqueezeBertTokenizerFast"]
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_squeezebert"] = [
44
+ "SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "SqueezeBertForMaskedLM",
46
+ "SqueezeBertForMultipleChoice",
47
+ "SqueezeBertForQuestionAnswering",
48
+ "SqueezeBertForSequenceClassification",
49
+ "SqueezeBertForTokenClassification",
50
+ "SqueezeBertModel",
51
+ "SqueezeBertModule",
52
+ "SqueezeBertPreTrainedModel",
53
+ ]
54
+
55
+
56
+ if TYPE_CHECKING:
57
+ from .configuration_squeezebert import (
58
+ SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
59
+ SqueezeBertConfig,
60
+ SqueezeBertOnnxConfig,
61
+ )
62
+ from .tokenization_squeezebert import SqueezeBertTokenizer
63
+
64
+ try:
65
+ if not is_tokenizers_available():
66
+ raise OptionalDependencyNotAvailable()
67
+ except OptionalDependencyNotAvailable:
68
+ pass
69
+ else:
70
+ from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
71
+
72
+ try:
73
+ if not is_torch_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .modeling_squeezebert import (
79
+ SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
80
+ SqueezeBertForMaskedLM,
81
+ SqueezeBertForMultipleChoice,
82
+ SqueezeBertForQuestionAnswering,
83
+ SqueezeBertForSequenceClassification,
84
+ SqueezeBertForTokenClassification,
85
+ SqueezeBertModel,
86
+ SqueezeBertModule,
87
+ SqueezeBertPreTrainedModel,
88
+ )
89
+
90
+ else:
91
+ import sys
92
+
93
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/modeling_squeezebert.cpython-310.pyc ADDED
Binary file (32.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/configuration_squeezebert.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ SqueezeBERT model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class SqueezeBertConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a
33
+ SqueezeBERT model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the SqueezeBERT
35
+ [squeezebert/squeezebert-uncased](https://huggingface.co/squeezebert/squeezebert-uncased) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 30522):
43
+ Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by
44
+ the `inputs_ids` passed when calling [`SqueezeBertModel`].
45
+ hidden_size (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the encoder layers and the pooler layer.
47
+ num_hidden_layers (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ intermediate_size (`int`, *optional*, defaults to 3072):
52
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
53
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
54
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
55
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
56
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for the attention probabilities.
60
+ max_position_embeddings (`int`, *optional*, defaults to 512):
61
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
62
+ just in case (e.g., 512 or 1024 or 2048).
63
+ type_vocab_size (`int`, *optional*, defaults to 2):
64
+ The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+
69
+ pad_token_id (`int`, *optional*, defaults to 0):
70
+ The ID of the token in the word embedding to use as padding.
71
+ embedding_size (`int`, *optional*, defaults to 768):
72
+ The dimension of the word embedding vectors.
73
+
74
+ q_groups (`int`, *optional*, defaults to 4):
75
+ The number of groups in Q layer.
76
+ k_groups (`int`, *optional*, defaults to 4):
77
+ The number of groups in K layer.
78
+ v_groups (`int`, *optional*, defaults to 4):
79
+ The number of groups in V layer.
80
+ post_attention_groups (`int`, *optional*, defaults to 1):
81
+ The number of groups in the first feed forward network layer.
82
+ intermediate_groups (`int`, *optional*, defaults to 4):
83
+ The number of groups in the second feed forward network layer.
84
+ output_groups (`int`, *optional*, defaults to 4):
85
+ The number of groups in the third feed forward network layer.
86
+
87
+ Examples:
88
+
89
+ ```python
90
+ >>> from transformers import SqueezeBertConfig, SqueezeBertModel
91
+
92
+ >>> # Initializing a SqueezeBERT configuration
93
+ >>> configuration = SqueezeBertConfig()
94
+
95
+ >>> # Initializing a model (with random weights) from the configuration above
96
+ >>> model = SqueezeBertModel(configuration)
97
+
98
+ >>> # Accessing the model configuration
99
+ >>> configuration = model.config
100
+ ```
101
+ """
102
+
103
+ model_type = "squeezebert"
104
+
105
+ def __init__(
106
+ self,
107
+ vocab_size=30522,
108
+ hidden_size=768,
109
+ num_hidden_layers=12,
110
+ num_attention_heads=12,
111
+ intermediate_size=3072,
112
+ hidden_act="gelu",
113
+ hidden_dropout_prob=0.1,
114
+ attention_probs_dropout_prob=0.1,
115
+ max_position_embeddings=512,
116
+ type_vocab_size=2,
117
+ initializer_range=0.02,
118
+ layer_norm_eps=1e-12,
119
+ pad_token_id=0,
120
+ embedding_size=768,
121
+ q_groups=4,
122
+ k_groups=4,
123
+ v_groups=4,
124
+ post_attention_groups=1,
125
+ intermediate_groups=4,
126
+ output_groups=4,
127
+ **kwargs,
128
+ ):
129
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
130
+
131
+ self.vocab_size = vocab_size
132
+ self.hidden_size = hidden_size
133
+ self.num_hidden_layers = num_hidden_layers
134
+ self.num_attention_heads = num_attention_heads
135
+ self.hidden_act = hidden_act
136
+ self.intermediate_size = intermediate_size
137
+ self.hidden_dropout_prob = hidden_dropout_prob
138
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
139
+ self.max_position_embeddings = max_position_embeddings
140
+ self.type_vocab_size = type_vocab_size
141
+ self.initializer_range = initializer_range
142
+ self.layer_norm_eps = layer_norm_eps
143
+ self.embedding_size = embedding_size
144
+ self.q_groups = q_groups
145
+ self.k_groups = k_groups
146
+ self.v_groups = v_groups
147
+ self.post_attention_groups = post_attention_groups
148
+ self.intermediate_groups = intermediate_groups
149
+ self.output_groups = output_groups
150
+
151
+
152
+ # # Copied from transformers.models.bert.configuration_bert.BertOnxxConfig with Bert->SqueezeBert
153
+ class SqueezeBertOnnxConfig(OnnxConfig):
154
+ @property
155
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
156
+ if self.task == "multiple-choice":
157
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
158
+ else:
159
+ dynamic_axis = {0: "batch", 1: "sequence"}
160
+ return OrderedDict(
161
+ [
162
+ ("input_ids", dynamic_axis),
163
+ ("attention_mask", dynamic_axis),
164
+ ("token_type_ids", dynamic_axis),
165
+ ]
166
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/modeling_squeezebert.py ADDED
@@ -0,0 +1,1087 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch SqueezeBert model."""
16
+
17
+
18
+ import math
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ BaseModelOutputWithPooling,
29
+ MaskedLMOutput,
30
+ MultipleChoiceModelOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
37
+ from .configuration_squeezebert import SqueezeBertConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "squeezebert/squeezebert-uncased"
43
+ _CONFIG_FOR_DOC = "SqueezeBertConfig"
44
+
45
+
46
+ from ..deprecated._archive_maps import SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
47
+
48
+
49
+ class SqueezeBertEmbeddings(nn.Module):
50
+ """Construct the embeddings from word, position and token_type embeddings."""
51
+
52
+ def __init__(self, config):
53
+ super().__init__()
54
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
55
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
56
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
57
+
58
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
59
+ # any TensorFlow checkpoint file
60
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
61
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
62
+
63
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
64
+ self.register_buffer(
65
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
66
+ )
67
+
68
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
69
+ if input_ids is not None:
70
+ input_shape = input_ids.size()
71
+ else:
72
+ input_shape = inputs_embeds.size()[:-1]
73
+
74
+ seq_length = input_shape[1]
75
+
76
+ if position_ids is None:
77
+ position_ids = self.position_ids[:, :seq_length]
78
+
79
+ if token_type_ids is None:
80
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
81
+
82
+ if inputs_embeds is None:
83
+ inputs_embeds = self.word_embeddings(input_ids)
84
+ position_embeddings = self.position_embeddings(position_ids)
85
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
86
+
87
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
88
+ embeddings = self.LayerNorm(embeddings)
89
+ embeddings = self.dropout(embeddings)
90
+ return embeddings
91
+
92
+
93
+ class MatMulWrapper(nn.Module):
94
+ """
95
+ Wrapper for torch.matmul(). This makes flop-counting easier to implement. Note that if you directly call
96
+ torch.matmul() in your code, the flop counter will typically ignore the flops of the matmul.
97
+ """
98
+
99
+ def __init__(self):
100
+ super().__init__()
101
+
102
+ def forward(self, mat1, mat2):
103
+ """
104
+
105
+ :param inputs: two torch tensors :return: matmul of these tensors
106
+
107
+ Here are the typical dimensions found in BERT (the B is optional) mat1.shape: [B, <optional extra dims>, M, K]
108
+ mat2.shape: [B, <optional extra dims>, K, N] output shape: [B, <optional extra dims>, M, N]
109
+ """
110
+ return torch.matmul(mat1, mat2)
111
+
112
+
113
+ class SqueezeBertLayerNorm(nn.LayerNorm):
114
+ """
115
+ This is a nn.LayerNorm subclass that accepts NCW data layout and performs normalization in the C dimension.
116
+
117
+ N = batch C = channels W = sequence length
118
+ """
119
+
120
+ def __init__(self, hidden_size, eps=1e-12):
121
+ nn.LayerNorm.__init__(self, normalized_shape=hidden_size, eps=eps) # instantiates self.{weight, bias, eps}
122
+
123
+ def forward(self, x):
124
+ x = x.permute(0, 2, 1)
125
+ x = nn.LayerNorm.forward(self, x)
126
+ return x.permute(0, 2, 1)
127
+
128
+
129
+ class ConvDropoutLayerNorm(nn.Module):
130
+ """
131
+ ConvDropoutLayerNorm: Conv, Dropout, LayerNorm
132
+ """
133
+
134
+ def __init__(self, cin, cout, groups, dropout_prob):
135
+ super().__init__()
136
+
137
+ self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
138
+ self.layernorm = SqueezeBertLayerNorm(cout)
139
+ self.dropout = nn.Dropout(dropout_prob)
140
+
141
+ def forward(self, hidden_states, input_tensor):
142
+ x = self.conv1d(hidden_states)
143
+ x = self.dropout(x)
144
+ x = x + input_tensor
145
+ x = self.layernorm(x)
146
+ return x
147
+
148
+
149
+ class ConvActivation(nn.Module):
150
+ """
151
+ ConvActivation: Conv, Activation
152
+ """
153
+
154
+ def __init__(self, cin, cout, groups, act):
155
+ super().__init__()
156
+ self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
157
+ self.act = ACT2FN[act]
158
+
159
+ def forward(self, x):
160
+ output = self.conv1d(x)
161
+ return self.act(output)
162
+
163
+
164
+ class SqueezeBertSelfAttention(nn.Module):
165
+ def __init__(self, config, cin, q_groups=1, k_groups=1, v_groups=1):
166
+ """
167
+ config = used for some things; ignored for others (work in progress...) cin = input channels = output channels
168
+ groups = number of groups to use in conv1d layers
169
+ """
170
+ super().__init__()
171
+ if cin % config.num_attention_heads != 0:
172
+ raise ValueError(
173
+ f"cin ({cin}) is not a multiple of the number of attention heads ({config.num_attention_heads})"
174
+ )
175
+ self.num_attention_heads = config.num_attention_heads
176
+ self.attention_head_size = int(cin / config.num_attention_heads)
177
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
178
+
179
+ self.query = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=q_groups)
180
+ self.key = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=k_groups)
181
+ self.value = nn.Conv1d(in_channels=cin, out_channels=cin, kernel_size=1, groups=v_groups)
182
+
183
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
184
+ self.softmax = nn.Softmax(dim=-1)
185
+
186
+ self.matmul_qk = MatMulWrapper()
187
+ self.matmul_qkv = MatMulWrapper()
188
+
189
+ def transpose_for_scores(self, x):
190
+ """
191
+ - input: [N, C, W]
192
+ - output: [N, C1, W, C2] where C1 is the head index, and C2 is one head's contents
193
+ """
194
+ new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W]
195
+ x = x.view(*new_x_shape)
196
+ return x.permute(0, 1, 3, 2) # [N, C1, C2, W] --> [N, C1, W, C2]
197
+
198
+ def transpose_key_for_scores(self, x):
199
+ """
200
+ - input: [N, C, W]
201
+ - output: [N, C1, C2, W] where C1 is the head index, and C2 is one head's contents
202
+ """
203
+ new_x_shape = (x.size()[0], self.num_attention_heads, self.attention_head_size, x.size()[-1]) # [N, C1, C2, W]
204
+ x = x.view(*new_x_shape)
205
+ # no `permute` needed
206
+ return x
207
+
208
+ def transpose_output(self, x):
209
+ """
210
+ - input: [N, C1, W, C2]
211
+ - output: [N, C, W]
212
+ """
213
+ x = x.permute(0, 1, 3, 2).contiguous() # [N, C1, C2, W]
214
+ new_x_shape = (x.size()[0], self.all_head_size, x.size()[3]) # [N, C, W]
215
+ x = x.view(*new_x_shape)
216
+ return x
217
+
218
+ def forward(self, hidden_states, attention_mask, output_attentions):
219
+ """
220
+ expects hidden_states in [N, C, W] data layout.
221
+
222
+ The attention_mask data layout is [N, W], and it does not need to be transposed.
223
+ """
224
+ mixed_query_layer = self.query(hidden_states)
225
+ mixed_key_layer = self.key(hidden_states)
226
+ mixed_value_layer = self.value(hidden_states)
227
+
228
+ query_layer = self.transpose_for_scores(mixed_query_layer)
229
+ key_layer = self.transpose_key_for_scores(mixed_key_layer)
230
+ value_layer = self.transpose_for_scores(mixed_value_layer)
231
+
232
+ # Take the dot product between "query" and "key" to get the raw attention scores.
233
+ attention_score = self.matmul_qk(query_layer, key_layer)
234
+ attention_score = attention_score / math.sqrt(self.attention_head_size)
235
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
236
+ attention_score = attention_score + attention_mask
237
+
238
+ # Normalize the attention scores to probabilities.
239
+ attention_probs = self.softmax(attention_score)
240
+
241
+ # This is actually dropping out entire tokens to attend to, which might
242
+ # seem a bit unusual, but is taken from the original Transformer paper.
243
+ attention_probs = self.dropout(attention_probs)
244
+
245
+ context_layer = self.matmul_qkv(attention_probs, value_layer)
246
+ context_layer = self.transpose_output(context_layer)
247
+
248
+ result = {"context_layer": context_layer}
249
+ if output_attentions:
250
+ result["attention_score"] = attention_score
251
+ return result
252
+
253
+
254
+ class SqueezeBertModule(nn.Module):
255
+ def __init__(self, config):
256
+ """
257
+ - hidden_size = input chans = output chans for Q, K, V (they are all the same ... for now) = output chans for
258
+ the module
259
+ - intermediate_size = output chans for intermediate layer
260
+ - groups = number of groups for all layers in the BertModule. (eventually we could change the interface to
261
+ allow different groups for different layers)
262
+ """
263
+ super().__init__()
264
+
265
+ c0 = config.hidden_size
266
+ c1 = config.hidden_size
267
+ c2 = config.intermediate_size
268
+ c3 = config.hidden_size
269
+
270
+ self.attention = SqueezeBertSelfAttention(
271
+ config=config, cin=c0, q_groups=config.q_groups, k_groups=config.k_groups, v_groups=config.v_groups
272
+ )
273
+ self.post_attention = ConvDropoutLayerNorm(
274
+ cin=c0, cout=c1, groups=config.post_attention_groups, dropout_prob=config.hidden_dropout_prob
275
+ )
276
+ self.intermediate = ConvActivation(cin=c1, cout=c2, groups=config.intermediate_groups, act=config.hidden_act)
277
+ self.output = ConvDropoutLayerNorm(
278
+ cin=c2, cout=c3, groups=config.output_groups, dropout_prob=config.hidden_dropout_prob
279
+ )
280
+
281
+ def forward(self, hidden_states, attention_mask, output_attentions):
282
+ att = self.attention(hidden_states, attention_mask, output_attentions)
283
+ attention_output = att["context_layer"]
284
+
285
+ post_attention_output = self.post_attention(attention_output, hidden_states)
286
+ intermediate_output = self.intermediate(post_attention_output)
287
+ layer_output = self.output(intermediate_output, post_attention_output)
288
+
289
+ output_dict = {"feature_map": layer_output}
290
+ if output_attentions:
291
+ output_dict["attention_score"] = att["attention_score"]
292
+
293
+ return output_dict
294
+
295
+
296
+ class SqueezeBertEncoder(nn.Module):
297
+ def __init__(self, config):
298
+ super().__init__()
299
+
300
+ assert config.embedding_size == config.hidden_size, (
301
+ "If you want embedding_size != intermediate hidden_size, "
302
+ "please insert a Conv1d layer to adjust the number of channels "
303
+ "before the first SqueezeBertModule."
304
+ )
305
+
306
+ self.layers = nn.ModuleList(SqueezeBertModule(config) for _ in range(config.num_hidden_layers))
307
+
308
+ def forward(
309
+ self,
310
+ hidden_states,
311
+ attention_mask=None,
312
+ head_mask=None,
313
+ output_attentions=False,
314
+ output_hidden_states=False,
315
+ return_dict=True,
316
+ ):
317
+ if head_mask is None:
318
+ head_mask_is_all_none = True
319
+ elif head_mask.count(None) == len(head_mask):
320
+ head_mask_is_all_none = True
321
+ else:
322
+ head_mask_is_all_none = False
323
+ assert head_mask_is_all_none is True, "head_mask is not yet supported in the SqueezeBert implementation."
324
+
325
+ # [batch_size, sequence_length, hidden_size] --> [batch_size, hidden_size, sequence_length]
326
+ hidden_states = hidden_states.permute(0, 2, 1)
327
+
328
+ all_hidden_states = () if output_hidden_states else None
329
+ all_attentions = () if output_attentions else None
330
+
331
+ for layer in self.layers:
332
+ if output_hidden_states:
333
+ hidden_states = hidden_states.permute(0, 2, 1)
334
+ all_hidden_states += (hidden_states,)
335
+ hidden_states = hidden_states.permute(0, 2, 1)
336
+
337
+ layer_output = layer.forward(hidden_states, attention_mask, output_attentions)
338
+
339
+ hidden_states = layer_output["feature_map"]
340
+
341
+ if output_attentions:
342
+ all_attentions += (layer_output["attention_score"],)
343
+
344
+ # [batch_size, hidden_size, sequence_length] --> [batch_size, sequence_length, hidden_size]
345
+ hidden_states = hidden_states.permute(0, 2, 1)
346
+
347
+ if output_hidden_states:
348
+ all_hidden_states += (hidden_states,)
349
+
350
+ if not return_dict:
351
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
352
+ return BaseModelOutput(
353
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
354
+ )
355
+
356
+
357
+ class SqueezeBertPooler(nn.Module):
358
+ def __init__(self, config):
359
+ super().__init__()
360
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
361
+ self.activation = nn.Tanh()
362
+
363
+ def forward(self, hidden_states):
364
+ # We "pool" the model by simply taking the hidden state corresponding
365
+ # to the first token.
366
+ first_token_tensor = hidden_states[:, 0]
367
+ pooled_output = self.dense(first_token_tensor)
368
+ pooled_output = self.activation(pooled_output)
369
+ return pooled_output
370
+
371
+
372
+ class SqueezeBertPredictionHeadTransform(nn.Module):
373
+ def __init__(self, config):
374
+ super().__init__()
375
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
376
+ if isinstance(config.hidden_act, str):
377
+ self.transform_act_fn = ACT2FN[config.hidden_act]
378
+ else:
379
+ self.transform_act_fn = config.hidden_act
380
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
381
+
382
+ def forward(self, hidden_states):
383
+ hidden_states = self.dense(hidden_states)
384
+ hidden_states = self.transform_act_fn(hidden_states)
385
+ hidden_states = self.LayerNorm(hidden_states)
386
+ return hidden_states
387
+
388
+
389
+ class SqueezeBertLMPredictionHead(nn.Module):
390
+ def __init__(self, config):
391
+ super().__init__()
392
+ self.transform = SqueezeBertPredictionHeadTransform(config)
393
+
394
+ # The output weights are the same as the input embeddings, but there is
395
+ # an output-only bias for each token.
396
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
397
+
398
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
399
+
400
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
401
+ self.decoder.bias = self.bias
402
+
403
+ def forward(self, hidden_states):
404
+ hidden_states = self.transform(hidden_states)
405
+ hidden_states = self.decoder(hidden_states)
406
+ return hidden_states
407
+
408
+
409
+ class SqueezeBertOnlyMLMHead(nn.Module):
410
+ def __init__(self, config):
411
+ super().__init__()
412
+ self.predictions = SqueezeBertLMPredictionHead(config)
413
+
414
+ def forward(self, sequence_output):
415
+ prediction_scores = self.predictions(sequence_output)
416
+ return prediction_scores
417
+
418
+
419
+ class SqueezeBertPreTrainedModel(PreTrainedModel):
420
+ """
421
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
422
+ models.
423
+ """
424
+
425
+ config_class = SqueezeBertConfig
426
+ base_model_prefix = "transformer"
427
+
428
+ def _init_weights(self, module):
429
+ """Initialize the weights"""
430
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
431
+ # Slightly different from the TF version which uses truncated_normal for initialization
432
+ # cf https://github.com/pytorch/pytorch/pull/5617
433
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
434
+ if module.bias is not None:
435
+ module.bias.data.zero_()
436
+ elif isinstance(module, nn.Embedding):
437
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
438
+ if module.padding_idx is not None:
439
+ module.weight.data[module.padding_idx].zero_()
440
+ elif isinstance(module, SqueezeBertLayerNorm):
441
+ module.bias.data.zero_()
442
+ module.weight.data.fill_(1.0)
443
+
444
+
445
+ SQUEEZEBERT_START_DOCSTRING = r"""
446
+
447
+ The SqueezeBERT model was proposed in [SqueezeBERT: What can computer vision teach NLP about efficient neural
448
+ networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W.
449
+ Keutzer
450
+
451
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
452
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
453
+ etc.)
454
+
455
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
456
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
457
+ and behavior.
458
+
459
+ For best results finetuning SqueezeBERT on text classification tasks, it is recommended to use the
460
+ *squeezebert/squeezebert-mnli-headless* checkpoint as a starting point.
461
+
462
+ Parameters:
463
+ config ([`SqueezeBertConfig`]): Model configuration class with all the parameters of the model.
464
+ Initializing with a config file does not load the weights associated with the model, only the
465
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
466
+
467
+ Hierarchy:
468
+
469
+ ```
470
+ Internal class hierarchy:
471
+ SqueezeBertModel
472
+ SqueezeBertEncoder
473
+ SqueezeBertModule
474
+ SqueezeBertSelfAttention
475
+ ConvActivation
476
+ ConvDropoutLayerNorm
477
+ ```
478
+
479
+ Data layouts:
480
+
481
+ ```
482
+ Input data is in [batch, sequence_length, hidden_size] format.
483
+
484
+ Data inside the encoder is in [batch, hidden_size, sequence_length] format. But, if `output_hidden_states == True`, the data from inside the encoder is returned in [batch, sequence_length, hidden_size] format.
485
+
486
+ The final output of the encoder is in [batch, sequence_length, hidden_size] format.
487
+ ```
488
+ """
489
+
490
+ SQUEEZEBERT_INPUTS_DOCSTRING = r"""
491
+ Args:
492
+ input_ids (`torch.LongTensor` of shape `({0})`):
493
+ Indices of input sequence tokens in the vocabulary.
494
+
495
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
496
+ [`PreTrainedTokenizer.__call__`] for details.
497
+
498
+ [What are input IDs?](../glossary#input-ids)
499
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
500
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
501
+
502
+ - 1 for tokens that are **not masked**,
503
+ - 0 for tokens that are **masked**.
504
+
505
+ [What are attention masks?](../glossary#attention-mask)
506
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
507
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
508
+ 1]`:
509
+
510
+ - 0 corresponds to a *sentence A* token,
511
+ - 1 corresponds to a *sentence B* token.
512
+
513
+ [What are token type IDs?](../glossary#token-type-ids)
514
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
515
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
516
+ config.max_position_embeddings - 1]`.
517
+
518
+ [What are position IDs?](../glossary#position-ids)
519
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
520
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
521
+
522
+ - 1 indicates the head is **not masked**,
523
+ - 0 indicates the head is **masked**.
524
+
525
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
526
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
527
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
528
+ model's internal embedding lookup matrix.
529
+ output_attentions (`bool`, *optional*):
530
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
531
+ tensors for more detail.
532
+ output_hidden_states (`bool`, *optional*):
533
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
534
+ more detail.
535
+ return_dict (`bool`, *optional*):
536
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
537
+ """
538
+
539
+
540
+ @add_start_docstrings(
541
+ "The bare SqueezeBERT Model transformer outputting raw hidden-states without any specific head on top.",
542
+ SQUEEZEBERT_START_DOCSTRING,
543
+ )
544
+ class SqueezeBertModel(SqueezeBertPreTrainedModel):
545
+ def __init__(self, config):
546
+ super().__init__(config)
547
+
548
+ self.embeddings = SqueezeBertEmbeddings(config)
549
+ self.encoder = SqueezeBertEncoder(config)
550
+ self.pooler = SqueezeBertPooler(config)
551
+
552
+ # Initialize weights and apply final processing
553
+ self.post_init()
554
+
555
+ def get_input_embeddings(self):
556
+ return self.embeddings.word_embeddings
557
+
558
+ def set_input_embeddings(self, new_embeddings):
559
+ self.embeddings.word_embeddings = new_embeddings
560
+
561
+ def _prune_heads(self, heads_to_prune):
562
+ """
563
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
564
+ class PreTrainedModel
565
+ """
566
+ for layer, heads in heads_to_prune.items():
567
+ self.encoder.layer[layer].attention.prune_heads(heads)
568
+
569
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
570
+ @add_code_sample_docstrings(
571
+ checkpoint=_CHECKPOINT_FOR_DOC,
572
+ output_type=BaseModelOutputWithPooling,
573
+ config_class=_CONFIG_FOR_DOC,
574
+ )
575
+ def forward(
576
+ self,
577
+ input_ids: Optional[torch.Tensor] = None,
578
+ attention_mask: Optional[torch.Tensor] = None,
579
+ token_type_ids: Optional[torch.Tensor] = None,
580
+ position_ids: Optional[torch.Tensor] = None,
581
+ head_mask: Optional[torch.Tensor] = None,
582
+ inputs_embeds: Optional[torch.FloatTensor] = None,
583
+ output_attentions: Optional[bool] = None,
584
+ output_hidden_states: Optional[bool] = None,
585
+ return_dict: Optional[bool] = None,
586
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
587
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
588
+ output_hidden_states = (
589
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
590
+ )
591
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
592
+
593
+ if input_ids is not None and inputs_embeds is not None:
594
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
595
+ elif input_ids is not None:
596
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
597
+ input_shape = input_ids.size()
598
+ elif inputs_embeds is not None:
599
+ input_shape = inputs_embeds.size()[:-1]
600
+ else:
601
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
602
+
603
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
604
+
605
+ if attention_mask is None:
606
+ attention_mask = torch.ones(input_shape, device=device)
607
+ if token_type_ids is None:
608
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
609
+
610
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
611
+ # Prepare head mask if needed
612
+ # 1.0 in head_mask indicate we keep the head
613
+ # attention_probs has shape bsz x n_heads x N x N
614
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
615
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
616
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
617
+
618
+ embedding_output = self.embeddings(
619
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
620
+ )
621
+ encoder_outputs = self.encoder(
622
+ hidden_states=embedding_output,
623
+ attention_mask=extended_attention_mask,
624
+ head_mask=head_mask,
625
+ output_attentions=output_attentions,
626
+ output_hidden_states=output_hidden_states,
627
+ return_dict=return_dict,
628
+ )
629
+ sequence_output = encoder_outputs[0]
630
+ pooled_output = self.pooler(sequence_output)
631
+
632
+ if not return_dict:
633
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
634
+
635
+ return BaseModelOutputWithPooling(
636
+ last_hidden_state=sequence_output,
637
+ pooler_output=pooled_output,
638
+ hidden_states=encoder_outputs.hidden_states,
639
+ attentions=encoder_outputs.attentions,
640
+ )
641
+
642
+
643
+ @add_start_docstrings("""SqueezeBERT Model with a `language modeling` head on top.""", SQUEEZEBERT_START_DOCSTRING)
644
+ class SqueezeBertForMaskedLM(SqueezeBertPreTrainedModel):
645
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
646
+
647
+ def __init__(self, config):
648
+ super().__init__(config)
649
+
650
+ self.transformer = SqueezeBertModel(config)
651
+ self.cls = SqueezeBertOnlyMLMHead(config)
652
+
653
+ # Initialize weights and apply final processing
654
+ self.post_init()
655
+
656
+ def get_output_embeddings(self):
657
+ return self.cls.predictions.decoder
658
+
659
+ def set_output_embeddings(self, new_embeddings):
660
+ self.cls.predictions.decoder = new_embeddings
661
+
662
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
663
+ @add_code_sample_docstrings(
664
+ checkpoint=_CHECKPOINT_FOR_DOC,
665
+ output_type=MaskedLMOutput,
666
+ config_class=_CONFIG_FOR_DOC,
667
+ )
668
+ def forward(
669
+ self,
670
+ input_ids: Optional[torch.Tensor] = None,
671
+ attention_mask: Optional[torch.Tensor] = None,
672
+ token_type_ids: Optional[torch.Tensor] = None,
673
+ position_ids: Optional[torch.Tensor] = None,
674
+ head_mask: Optional[torch.Tensor] = None,
675
+ inputs_embeds: Optional[torch.Tensor] = None,
676
+ labels: Optional[torch.Tensor] = None,
677
+ output_attentions: Optional[bool] = None,
678
+ output_hidden_states: Optional[bool] = None,
679
+ return_dict: Optional[bool] = None,
680
+ ) -> Union[Tuple, MaskedLMOutput]:
681
+ r"""
682
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
683
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
684
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
685
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
686
+ """
687
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
688
+
689
+ outputs = self.transformer(
690
+ input_ids,
691
+ attention_mask=attention_mask,
692
+ token_type_ids=token_type_ids,
693
+ position_ids=position_ids,
694
+ head_mask=head_mask,
695
+ inputs_embeds=inputs_embeds,
696
+ output_attentions=output_attentions,
697
+ output_hidden_states=output_hidden_states,
698
+ return_dict=return_dict,
699
+ )
700
+
701
+ sequence_output = outputs[0]
702
+ prediction_scores = self.cls(sequence_output)
703
+
704
+ masked_lm_loss = None
705
+ if labels is not None:
706
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
707
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
708
+
709
+ if not return_dict:
710
+ output = (prediction_scores,) + outputs[2:]
711
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
712
+
713
+ return MaskedLMOutput(
714
+ loss=masked_lm_loss,
715
+ logits=prediction_scores,
716
+ hidden_states=outputs.hidden_states,
717
+ attentions=outputs.attentions,
718
+ )
719
+
720
+
721
+ @add_start_docstrings(
722
+ """
723
+ SqueezeBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
724
+ pooled output) e.g. for GLUE tasks.
725
+ """,
726
+ SQUEEZEBERT_START_DOCSTRING,
727
+ )
728
+ class SqueezeBertForSequenceClassification(SqueezeBertPreTrainedModel):
729
+ def __init__(self, config):
730
+ super().__init__(config)
731
+ self.num_labels = config.num_labels
732
+ self.config = config
733
+
734
+ self.transformer = SqueezeBertModel(config)
735
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
736
+ self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
737
+
738
+ # Initialize weights and apply final processing
739
+ self.post_init()
740
+
741
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
742
+ @add_code_sample_docstrings(
743
+ checkpoint=_CHECKPOINT_FOR_DOC,
744
+ output_type=SequenceClassifierOutput,
745
+ config_class=_CONFIG_FOR_DOC,
746
+ )
747
+ def forward(
748
+ self,
749
+ input_ids: Optional[torch.Tensor] = None,
750
+ attention_mask: Optional[torch.Tensor] = None,
751
+ token_type_ids: Optional[torch.Tensor] = None,
752
+ position_ids: Optional[torch.Tensor] = None,
753
+ head_mask: Optional[torch.Tensor] = None,
754
+ inputs_embeds: Optional[torch.Tensor] = None,
755
+ labels: Optional[torch.Tensor] = None,
756
+ output_attentions: Optional[bool] = None,
757
+ output_hidden_states: Optional[bool] = None,
758
+ return_dict: Optional[bool] = None,
759
+ ) -> Union[Tuple, SequenceClassifierOutput]:
760
+ r"""
761
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
762
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
763
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
764
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
765
+ """
766
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
767
+
768
+ outputs = self.transformer(
769
+ input_ids,
770
+ attention_mask=attention_mask,
771
+ token_type_ids=token_type_ids,
772
+ position_ids=position_ids,
773
+ head_mask=head_mask,
774
+ inputs_embeds=inputs_embeds,
775
+ output_attentions=output_attentions,
776
+ output_hidden_states=output_hidden_states,
777
+ return_dict=return_dict,
778
+ )
779
+
780
+ pooled_output = outputs[1]
781
+
782
+ pooled_output = self.dropout(pooled_output)
783
+ logits = self.classifier(pooled_output)
784
+
785
+ loss = None
786
+ if labels is not None:
787
+ if self.config.problem_type is None:
788
+ if self.num_labels == 1:
789
+ self.config.problem_type = "regression"
790
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
791
+ self.config.problem_type = "single_label_classification"
792
+ else:
793
+ self.config.problem_type = "multi_label_classification"
794
+
795
+ if self.config.problem_type == "regression":
796
+ loss_fct = MSELoss()
797
+ if self.num_labels == 1:
798
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
799
+ else:
800
+ loss = loss_fct(logits, labels)
801
+ elif self.config.problem_type == "single_label_classification":
802
+ loss_fct = CrossEntropyLoss()
803
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
804
+ elif self.config.problem_type == "multi_label_classification":
805
+ loss_fct = BCEWithLogitsLoss()
806
+ loss = loss_fct(logits, labels)
807
+
808
+ if not return_dict:
809
+ output = (logits,) + outputs[2:]
810
+ return ((loss,) + output) if loss is not None else output
811
+
812
+ return SequenceClassifierOutput(
813
+ loss=loss,
814
+ logits=logits,
815
+ hidden_states=outputs.hidden_states,
816
+ attentions=outputs.attentions,
817
+ )
818
+
819
+
820
+ @add_start_docstrings(
821
+ """
822
+ SqueezeBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
823
+ a softmax) e.g. for RocStories/SWAG tasks.
824
+ """,
825
+ SQUEEZEBERT_START_DOCSTRING,
826
+ )
827
+ class SqueezeBertForMultipleChoice(SqueezeBertPreTrainedModel):
828
+ def __init__(self, config):
829
+ super().__init__(config)
830
+
831
+ self.transformer = SqueezeBertModel(config)
832
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
833
+ self.classifier = nn.Linear(config.hidden_size, 1)
834
+
835
+ # Initialize weights and apply final processing
836
+ self.post_init()
837
+
838
+ @add_start_docstrings_to_model_forward(
839
+ SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
840
+ )
841
+ @add_code_sample_docstrings(
842
+ checkpoint=_CHECKPOINT_FOR_DOC,
843
+ output_type=MultipleChoiceModelOutput,
844
+ config_class=_CONFIG_FOR_DOC,
845
+ )
846
+ def forward(
847
+ self,
848
+ input_ids: Optional[torch.Tensor] = None,
849
+ attention_mask: Optional[torch.Tensor] = None,
850
+ token_type_ids: Optional[torch.Tensor] = None,
851
+ position_ids: Optional[torch.Tensor] = None,
852
+ head_mask: Optional[torch.Tensor] = None,
853
+ inputs_embeds: Optional[torch.Tensor] = None,
854
+ labels: Optional[torch.Tensor] = None,
855
+ output_attentions: Optional[bool] = None,
856
+ output_hidden_states: Optional[bool] = None,
857
+ return_dict: Optional[bool] = None,
858
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
859
+ r"""
860
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
861
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
862
+ num_choices-1]` where *num_choices* is the size of the second dimension of the input tensors. (see
863
+ *input_ids* above)
864
+ """
865
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
866
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
867
+
868
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
869
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
870
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
871
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
872
+ inputs_embeds = (
873
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
874
+ if inputs_embeds is not None
875
+ else None
876
+ )
877
+
878
+ outputs = self.transformer(
879
+ input_ids,
880
+ attention_mask=attention_mask,
881
+ token_type_ids=token_type_ids,
882
+ position_ids=position_ids,
883
+ head_mask=head_mask,
884
+ inputs_embeds=inputs_embeds,
885
+ output_attentions=output_attentions,
886
+ output_hidden_states=output_hidden_states,
887
+ return_dict=return_dict,
888
+ )
889
+
890
+ pooled_output = outputs[1]
891
+
892
+ pooled_output = self.dropout(pooled_output)
893
+ logits = self.classifier(pooled_output)
894
+ reshaped_logits = logits.view(-1, num_choices)
895
+
896
+ loss = None
897
+ if labels is not None:
898
+ loss_fct = CrossEntropyLoss()
899
+ loss = loss_fct(reshaped_logits, labels)
900
+
901
+ if not return_dict:
902
+ output = (reshaped_logits,) + outputs[2:]
903
+ return ((loss,) + output) if loss is not None else output
904
+
905
+ return MultipleChoiceModelOutput(
906
+ loss=loss,
907
+ logits=reshaped_logits,
908
+ hidden_states=outputs.hidden_states,
909
+ attentions=outputs.attentions,
910
+ )
911
+
912
+
913
+ @add_start_docstrings(
914
+ """
915
+ SqueezeBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
916
+ for Named-Entity-Recognition (NER) tasks.
917
+ """,
918
+ SQUEEZEBERT_START_DOCSTRING,
919
+ )
920
+ class SqueezeBertForTokenClassification(SqueezeBertPreTrainedModel):
921
+ def __init__(self, config):
922
+ super().__init__(config)
923
+ self.num_labels = config.num_labels
924
+
925
+ self.transformer = SqueezeBertModel(config)
926
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
927
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
928
+
929
+ # Initialize weights and apply final processing
930
+ self.post_init()
931
+
932
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
933
+ @add_code_sample_docstrings(
934
+ checkpoint=_CHECKPOINT_FOR_DOC,
935
+ output_type=TokenClassifierOutput,
936
+ config_class=_CONFIG_FOR_DOC,
937
+ )
938
+ def forward(
939
+ self,
940
+ input_ids: Optional[torch.Tensor] = None,
941
+ attention_mask: Optional[torch.Tensor] = None,
942
+ token_type_ids: Optional[torch.Tensor] = None,
943
+ position_ids: Optional[torch.Tensor] = None,
944
+ head_mask: Optional[torch.Tensor] = None,
945
+ inputs_embeds: Optional[torch.Tensor] = None,
946
+ labels: Optional[torch.Tensor] = None,
947
+ output_attentions: Optional[bool] = None,
948
+ output_hidden_states: Optional[bool] = None,
949
+ return_dict: Optional[bool] = None,
950
+ ) -> Union[Tuple, TokenClassifierOutput]:
951
+ r"""
952
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
953
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
954
+ """
955
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
956
+
957
+ outputs = self.transformer(
958
+ input_ids,
959
+ attention_mask=attention_mask,
960
+ token_type_ids=token_type_ids,
961
+ position_ids=position_ids,
962
+ head_mask=head_mask,
963
+ inputs_embeds=inputs_embeds,
964
+ output_attentions=output_attentions,
965
+ output_hidden_states=output_hidden_states,
966
+ return_dict=return_dict,
967
+ )
968
+
969
+ sequence_output = outputs[0]
970
+
971
+ sequence_output = self.dropout(sequence_output)
972
+ logits = self.classifier(sequence_output)
973
+
974
+ loss = None
975
+ if labels is not None:
976
+ loss_fct = CrossEntropyLoss()
977
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
978
+
979
+ if not return_dict:
980
+ output = (logits,) + outputs[2:]
981
+ return ((loss,) + output) if loss is not None else output
982
+
983
+ return TokenClassifierOutput(
984
+ loss=loss,
985
+ logits=logits,
986
+ hidden_states=outputs.hidden_states,
987
+ attentions=outputs.attentions,
988
+ )
989
+
990
+
991
+ @add_start_docstrings(
992
+ """
993
+ SqueezeBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
994
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
995
+ """,
996
+ SQUEEZEBERT_START_DOCSTRING,
997
+ )
998
+ class SqueezeBertForQuestionAnswering(SqueezeBertPreTrainedModel):
999
+ def __init__(self, config):
1000
+ super().__init__(config)
1001
+ self.num_labels = config.num_labels
1002
+
1003
+ self.transformer = SqueezeBertModel(config)
1004
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1005
+
1006
+ # Initialize weights and apply final processing
1007
+ self.post_init()
1008
+
1009
+ @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1010
+ @add_code_sample_docstrings(
1011
+ checkpoint=_CHECKPOINT_FOR_DOC,
1012
+ output_type=QuestionAnsweringModelOutput,
1013
+ config_class=_CONFIG_FOR_DOC,
1014
+ )
1015
+ def forward(
1016
+ self,
1017
+ input_ids: Optional[torch.Tensor] = None,
1018
+ attention_mask: Optional[torch.Tensor] = None,
1019
+ token_type_ids: Optional[torch.Tensor] = None,
1020
+ position_ids: Optional[torch.Tensor] = None,
1021
+ head_mask: Optional[torch.Tensor] = None,
1022
+ inputs_embeds: Optional[torch.Tensor] = None,
1023
+ start_positions: Optional[torch.Tensor] = None,
1024
+ end_positions: Optional[torch.Tensor] = None,
1025
+ output_attentions: Optional[bool] = None,
1026
+ output_hidden_states: Optional[bool] = None,
1027
+ return_dict: Optional[bool] = None,
1028
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1029
+ r"""
1030
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1031
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1032
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
1033
+ are not taken into account for computing the loss.
1034
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1035
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1036
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
1037
+ are not taken into account for computing the loss.
1038
+ """
1039
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1040
+
1041
+ outputs = self.transformer(
1042
+ input_ids,
1043
+ attention_mask=attention_mask,
1044
+ token_type_ids=token_type_ids,
1045
+ position_ids=position_ids,
1046
+ head_mask=head_mask,
1047
+ inputs_embeds=inputs_embeds,
1048
+ output_attentions=output_attentions,
1049
+ output_hidden_states=output_hidden_states,
1050
+ return_dict=return_dict,
1051
+ )
1052
+
1053
+ sequence_output = outputs[0]
1054
+
1055
+ logits = self.qa_outputs(sequence_output)
1056
+ start_logits, end_logits = logits.split(1, dim=-1)
1057
+ start_logits = start_logits.squeeze(-1).contiguous()
1058
+ end_logits = end_logits.squeeze(-1).contiguous()
1059
+
1060
+ total_loss = None
1061
+ if start_positions is not None and end_positions is not None:
1062
+ # If we are on multi-GPU, split add a dimension
1063
+ if len(start_positions.size()) > 1:
1064
+ start_positions = start_positions.squeeze(-1)
1065
+ if len(end_positions.size()) > 1:
1066
+ end_positions = end_positions.squeeze(-1)
1067
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1068
+ ignored_index = start_logits.size(1)
1069
+ start_positions = start_positions.clamp(0, ignored_index)
1070
+ end_positions = end_positions.clamp(0, ignored_index)
1071
+
1072
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1073
+ start_loss = loss_fct(start_logits, start_positions)
1074
+ end_loss = loss_fct(end_logits, end_positions)
1075
+ total_loss = (start_loss + end_loss) / 2
1076
+
1077
+ if not return_dict:
1078
+ output = (start_logits, end_logits) + outputs[2:]
1079
+ return ((total_loss,) + output) if total_loss is not None else output
1080
+
1081
+ return QuestionAnsweringModelOutput(
1082
+ loss=total_loss,
1083
+ start_logits=start_logits,
1084
+ end_logits=end_logits,
1085
+ hidden_states=outputs.hidden_states,
1086
+ attentions=outputs.attentions,
1087
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/tokenization_squeezebert.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for SqueezeBERT."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+
31
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
32
+ def load_vocab(vocab_file):
33
+ """Loads a vocabulary file into a dictionary."""
34
+ vocab = collections.OrderedDict()
35
+ with open(vocab_file, "r", encoding="utf-8") as reader:
36
+ tokens = reader.readlines()
37
+ for index, token in enumerate(tokens):
38
+ token = token.rstrip("\n")
39
+ vocab[token] = index
40
+ return vocab
41
+
42
+
43
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
44
+ def whitespace_tokenize(text):
45
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
46
+ text = text.strip()
47
+ if not text:
48
+ return []
49
+ tokens = text.split()
50
+ return tokens
51
+
52
+
53
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->SqueezeBert,BERT->SqueezeBERT
54
+ class SqueezeBertTokenizer(PreTrainedTokenizer):
55
+ r"""
56
+ Construct a SqueezeBERT tokenizer. Based on WordPiece.
57
+
58
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
59
+ this superclass for more information regarding those methods.
60
+
61
+ Args:
62
+ vocab_file (`str`):
63
+ File containing the vocabulary.
64
+ do_lower_case (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to lowercase the input when tokenizing.
66
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
67
+ Whether or not to do basic tokenization before WordPiece.
68
+ never_split (`Iterable`, *optional*):
69
+ Collection of tokens which will never be split during tokenization. Only has an effect when
70
+ `do_basic_tokenize=True`
71
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
72
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
73
+ token instead.
74
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
75
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
76
+ sequence classification or for a text and a question for question answering. It is also used as the last
77
+ token of a sequence built with special tokens.
78
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
79
+ The token used for padding, for example when batching sequences of different lengths.
80
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
81
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
82
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
83
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
84
+ The token used for masking values. This is the token used when training this model with masked language
85
+ modeling. This is the token which the model will try to predict.
86
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
87
+ Whether or not to tokenize Chinese characters.
88
+
89
+ This should likely be deactivated for Japanese (see this
90
+ [issue](https://github.com/huggingface/transformers/issues/328)).
91
+ strip_accents (`bool`, *optional*):
92
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
93
+ value for `lowercase` (as in the original SqueezeBERT).
94
+ """
95
+
96
+ vocab_files_names = VOCAB_FILES_NAMES
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_file,
101
+ do_lower_case=True,
102
+ do_basic_tokenize=True,
103
+ never_split=None,
104
+ unk_token="[UNK]",
105
+ sep_token="[SEP]",
106
+ pad_token="[PAD]",
107
+ cls_token="[CLS]",
108
+ mask_token="[MASK]",
109
+ tokenize_chinese_chars=True,
110
+ strip_accents=None,
111
+ **kwargs,
112
+ ):
113
+ if not os.path.isfile(vocab_file):
114
+ raise ValueError(
115
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
116
+ " model use `tokenizer = SqueezeBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
117
+ )
118
+ self.vocab = load_vocab(vocab_file)
119
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
120
+ self.do_basic_tokenize = do_basic_tokenize
121
+ if do_basic_tokenize:
122
+ self.basic_tokenizer = BasicTokenizer(
123
+ do_lower_case=do_lower_case,
124
+ never_split=never_split,
125
+ tokenize_chinese_chars=tokenize_chinese_chars,
126
+ strip_accents=strip_accents,
127
+ )
128
+
129
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
130
+
131
+ super().__init__(
132
+ do_lower_case=do_lower_case,
133
+ do_basic_tokenize=do_basic_tokenize,
134
+ never_split=never_split,
135
+ unk_token=unk_token,
136
+ sep_token=sep_token,
137
+ pad_token=pad_token,
138
+ cls_token=cls_token,
139
+ mask_token=mask_token,
140
+ tokenize_chinese_chars=tokenize_chinese_chars,
141
+ strip_accents=strip_accents,
142
+ **kwargs,
143
+ )
144
+
145
+ @property
146
+ def do_lower_case(self):
147
+ return self.basic_tokenizer.do_lower_case
148
+
149
+ @property
150
+ def vocab_size(self):
151
+ return len(self.vocab)
152
+
153
+ def get_vocab(self):
154
+ return dict(self.vocab, **self.added_tokens_encoder)
155
+
156
+ def _tokenize(self, text, split_special_tokens=False):
157
+ split_tokens = []
158
+ if self.do_basic_tokenize:
159
+ for token in self.basic_tokenizer.tokenize(
160
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
161
+ ):
162
+ # If the token is part of the never_split set
163
+ if token in self.basic_tokenizer.never_split:
164
+ split_tokens.append(token)
165
+ else:
166
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
167
+ else:
168
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
169
+ return split_tokens
170
+
171
+ def _convert_token_to_id(self, token):
172
+ """Converts a token (str) in an id using the vocab."""
173
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
174
+
175
+ def _convert_id_to_token(self, index):
176
+ """Converts an index (integer) in a token (str) using the vocab."""
177
+ return self.ids_to_tokens.get(index, self.unk_token)
178
+
179
+ def convert_tokens_to_string(self, tokens):
180
+ """Converts a sequence of tokens (string) in a single string."""
181
+ out_string = " ".join(tokens).replace(" ##", "").strip()
182
+ return out_string
183
+
184
+ def build_inputs_with_special_tokens(
185
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
186
+ ) -> List[int]:
187
+ """
188
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
189
+ adding special tokens. A SqueezeBERT sequence has the following format:
190
+
191
+ - single sequence: `[CLS] X [SEP]`
192
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
193
+
194
+ Args:
195
+ token_ids_0 (`List[int]`):
196
+ List of IDs to which the special tokens will be added.
197
+ token_ids_1 (`List[int]`, *optional*):
198
+ Optional second list of IDs for sequence pairs.
199
+
200
+ Returns:
201
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
202
+ """
203
+ if token_ids_1 is None:
204
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
205
+ cls = [self.cls_token_id]
206
+ sep = [self.sep_token_id]
207
+ return cls + token_ids_0 + sep + token_ids_1 + sep
208
+
209
+ def get_special_tokens_mask(
210
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
211
+ ) -> List[int]:
212
+ """
213
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
214
+ special tokens using the tokenizer `prepare_for_model` method.
215
+
216
+ Args:
217
+ token_ids_0 (`List[int]`):
218
+ List of IDs.
219
+ token_ids_1 (`List[int]`, *optional*):
220
+ Optional second list of IDs for sequence pairs.
221
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
222
+ Whether or not the token list is already formatted with special tokens for the model.
223
+
224
+ Returns:
225
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
226
+ """
227
+
228
+ if already_has_special_tokens:
229
+ return super().get_special_tokens_mask(
230
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
231
+ )
232
+
233
+ if token_ids_1 is not None:
234
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
235
+ return [1] + ([0] * len(token_ids_0)) + [1]
236
+
237
+ def create_token_type_ids_from_sequences(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
239
+ ) -> List[int]:
240
+ """
241
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence
242
+ pair mask has the following format:
243
+
244
+ ```
245
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
246
+ | first sequence | second sequence |
247
+ ```
248
+
249
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
250
+
251
+ Args:
252
+ token_ids_0 (`List[int]`):
253
+ List of IDs.
254
+ token_ids_1 (`List[int]`, *optional*):
255
+ Optional second list of IDs for sequence pairs.
256
+
257
+ Returns:
258
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
259
+ """
260
+ sep = [self.sep_token_id]
261
+ cls = [self.cls_token_id]
262
+ if token_ids_1 is None:
263
+ return len(cls + token_ids_0 + sep) * [0]
264
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
265
+
266
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
267
+ index = 0
268
+ if os.path.isdir(save_directory):
269
+ vocab_file = os.path.join(
270
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
271
+ )
272
+ else:
273
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
274
+ with open(vocab_file, "w", encoding="utf-8") as writer:
275
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
276
+ if index != token_index:
277
+ logger.warning(
278
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
279
+ " Please check that the vocabulary is not corrupted!"
280
+ )
281
+ index = token_index
282
+ writer.write(token + "\n")
283
+ index += 1
284
+ return (vocab_file,)
285
+
286
+
287
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
288
+ class BasicTokenizer(object):
289
+ """
290
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
291
+
292
+ Args:
293
+ do_lower_case (`bool`, *optional*, defaults to `True`):
294
+ Whether or not to lowercase the input when tokenizing.
295
+ never_split (`Iterable`, *optional*):
296
+ Collection of tokens which will never be split during tokenization. Only has an effect when
297
+ `do_basic_tokenize=True`
298
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
299
+ Whether or not to tokenize Chinese characters.
300
+
301
+ This should likely be deactivated for Japanese (see this
302
+ [issue](https://github.com/huggingface/transformers/issues/328)).
303
+ strip_accents (`bool`, *optional*):
304
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
305
+ value for `lowercase` (as in the original BERT).
306
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
307
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
308
+ the full context of the words, such as contractions.
309
+ """
310
+
311
+ def __init__(
312
+ self,
313
+ do_lower_case=True,
314
+ never_split=None,
315
+ tokenize_chinese_chars=True,
316
+ strip_accents=None,
317
+ do_split_on_punc=True,
318
+ ):
319
+ if never_split is None:
320
+ never_split = []
321
+ self.do_lower_case = do_lower_case
322
+ self.never_split = set(never_split)
323
+ self.tokenize_chinese_chars = tokenize_chinese_chars
324
+ self.strip_accents = strip_accents
325
+ self.do_split_on_punc = do_split_on_punc
326
+
327
+ def tokenize(self, text, never_split=None):
328
+ """
329
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
330
+
331
+ Args:
332
+ never_split (`List[str]`, *optional*)
333
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
334
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
335
+ """
336
+ # union() returns a new set by concatenating the two sets.
337
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
338
+ text = self._clean_text(text)
339
+
340
+ # This was added on November 1st, 2018 for the multilingual and Chinese
341
+ # models. This is also applied to the English models now, but it doesn't
342
+ # matter since the English models were not trained on any Chinese data
343
+ # and generally don't have any Chinese data in them (there are Chinese
344
+ # characters in the vocabulary because Wikipedia does have some Chinese
345
+ # words in the English Wikipedia.).
346
+ if self.tokenize_chinese_chars:
347
+ text = self._tokenize_chinese_chars(text)
348
+ # prevents treating the same character with different unicode codepoints as different characters
349
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
350
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
351
+ split_tokens = []
352
+ for token in orig_tokens:
353
+ if token not in never_split:
354
+ if self.do_lower_case:
355
+ token = token.lower()
356
+ if self.strip_accents is not False:
357
+ token = self._run_strip_accents(token)
358
+ elif self.strip_accents:
359
+ token = self._run_strip_accents(token)
360
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
361
+
362
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
363
+ return output_tokens
364
+
365
+ def _run_strip_accents(self, text):
366
+ """Strips accents from a piece of text."""
367
+ text = unicodedata.normalize("NFD", text)
368
+ output = []
369
+ for char in text:
370
+ cat = unicodedata.category(char)
371
+ if cat == "Mn":
372
+ continue
373
+ output.append(char)
374
+ return "".join(output)
375
+
376
+ def _run_split_on_punc(self, text, never_split=None):
377
+ """Splits punctuation on a piece of text."""
378
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
379
+ return [text]
380
+ chars = list(text)
381
+ i = 0
382
+ start_new_word = True
383
+ output = []
384
+ while i < len(chars):
385
+ char = chars[i]
386
+ if _is_punctuation(char):
387
+ output.append([char])
388
+ start_new_word = True
389
+ else:
390
+ if start_new_word:
391
+ output.append([])
392
+ start_new_word = False
393
+ output[-1].append(char)
394
+ i += 1
395
+
396
+ return ["".join(x) for x in output]
397
+
398
+ def _tokenize_chinese_chars(self, text):
399
+ """Adds whitespace around any CJK character."""
400
+ output = []
401
+ for char in text:
402
+ cp = ord(char)
403
+ if self._is_chinese_char(cp):
404
+ output.append(" ")
405
+ output.append(char)
406
+ output.append(" ")
407
+ else:
408
+ output.append(char)
409
+ return "".join(output)
410
+
411
+ def _is_chinese_char(self, cp):
412
+ """Checks whether CP is the codepoint of a CJK character."""
413
+ # This defines a "chinese character" as anything in the CJK Unicode block:
414
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
415
+ #
416
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
417
+ # despite its name. The modern Korean Hangul alphabet is a different block,
418
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
419
+ # space-separated words, so they are not treated specially and handled
420
+ # like the all of the other languages.
421
+ if (
422
+ (cp >= 0x4E00 and cp <= 0x9FFF)
423
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
424
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
425
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
426
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
427
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
428
+ or (cp >= 0xF900 and cp <= 0xFAFF)
429
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
430
+ ): #
431
+ return True
432
+
433
+ return False
434
+
435
+ def _clean_text(self, text):
436
+ """Performs invalid character removal and whitespace cleanup on text."""
437
+ output = []
438
+ for char in text:
439
+ cp = ord(char)
440
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
441
+ continue
442
+ if _is_whitespace(char):
443
+ output.append(" ")
444
+ else:
445
+ output.append(char)
446
+ return "".join(output)
447
+
448
+
449
+ class WordpieceTokenizer(object):
450
+ """Runs WordPiece tokenization."""
451
+
452
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
453
+ self.vocab = vocab
454
+ self.unk_token = unk_token
455
+ self.max_input_chars_per_word = max_input_chars_per_word
456
+
457
+ def tokenize(self, text):
458
+ """
459
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
460
+ tokenization using the given vocabulary.
461
+
462
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
463
+
464
+ Args:
465
+ text: A single token or whitespace separated tokens. This should have
466
+ already been passed through *BasicTokenizer*.
467
+
468
+ Returns:
469
+ A list of wordpiece tokens.
470
+ """
471
+
472
+ output_tokens = []
473
+ for token in whitespace_tokenize(text):
474
+ chars = list(token)
475
+ if len(chars) > self.max_input_chars_per_word:
476
+ output_tokens.append(self.unk_token)
477
+ continue
478
+
479
+ is_bad = False
480
+ start = 0
481
+ sub_tokens = []
482
+ while start < len(chars):
483
+ end = len(chars)
484
+ cur_substr = None
485
+ while start < end:
486
+ substr = "".join(chars[start:end])
487
+ if start > 0:
488
+ substr = "##" + substr
489
+ if substr in self.vocab:
490
+ cur_substr = substr
491
+ break
492
+ end -= 1
493
+ if cur_substr is None:
494
+ is_bad = True
495
+ break
496
+ sub_tokens.append(cur_substr)
497
+ start = end
498
+
499
+ if is_bad:
500
+ output_tokens.append(self.unk_token)
501
+ else:
502
+ output_tokens.extend(sub_tokens)
503
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/tokenization_squeezebert_fast.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for SqueezeBERT."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_squeezebert import SqueezeBertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+
32
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with Bert->SqueezeBert,BERT->SqueezeBERT
33
+ class SqueezeBertTokenizerFast(PreTrainedTokenizerFast):
34
+ r"""
35
+ Construct a "fast" SqueezeBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
36
+
37
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
38
+ refer to this superclass for more information regarding those methods.
39
+
40
+ Args:
41
+ vocab_file (`str`):
42
+ File containing the vocabulary.
43
+ do_lower_case (`bool`, *optional*, defaults to `True`):
44
+ Whether or not to lowercase the input when tokenizing.
45
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
46
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
47
+ token instead.
48
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
49
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
50
+ sequence classification or for a text and a question for question answering. It is also used as the last
51
+ token of a sequence built with special tokens.
52
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
53
+ The token used for padding, for example when batching sequences of different lengths.
54
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
55
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
56
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
57
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
58
+ The token used for masking values. This is the token used when training this model with masked language
59
+ modeling. This is the token which the model will try to predict.
60
+ clean_text (`bool`, *optional*, defaults to `True`):
61
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
62
+ whitespaces by the classic one.
63
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
65
+ issue](https://github.com/huggingface/transformers/issues/328)).
66
+ strip_accents (`bool`, *optional*):
67
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
68
+ value for `lowercase` (as in the original SqueezeBERT).
69
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
70
+ The prefix for subwords.
71
+ """
72
+
73
+ vocab_files_names = VOCAB_FILES_NAMES
74
+ slow_tokenizer_class = SqueezeBertTokenizer
75
+
76
+ def __init__(
77
+ self,
78
+ vocab_file=None,
79
+ tokenizer_file=None,
80
+ do_lower_case=True,
81
+ unk_token="[UNK]",
82
+ sep_token="[SEP]",
83
+ pad_token="[PAD]",
84
+ cls_token="[CLS]",
85
+ mask_token="[MASK]",
86
+ tokenize_chinese_chars=True,
87
+ strip_accents=None,
88
+ **kwargs,
89
+ ):
90
+ super().__init__(
91
+ vocab_file,
92
+ tokenizer_file=tokenizer_file,
93
+ do_lower_case=do_lower_case,
94
+ unk_token=unk_token,
95
+ sep_token=sep_token,
96
+ pad_token=pad_token,
97
+ cls_token=cls_token,
98
+ mask_token=mask_token,
99
+ tokenize_chinese_chars=tokenize_chinese_chars,
100
+ strip_accents=strip_accents,
101
+ **kwargs,
102
+ )
103
+
104
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
105
+ if (
106
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
107
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
108
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
109
+ ):
110
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
111
+ normalizer_state["lowercase"] = do_lower_case
112
+ normalizer_state["strip_accents"] = strip_accents
113
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
114
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
115
+
116
+ self.do_lower_case = do_lower_case
117
+
118
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
119
+ """
120
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
121
+ adding special tokens. A SqueezeBERT sequence has the following format:
122
+
123
+ - single sequence: `[CLS] X [SEP]`
124
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
125
+
126
+ Args:
127
+ token_ids_0 (`List[int]`):
128
+ List of IDs to which the special tokens will be added.
129
+ token_ids_1 (`List[int]`, *optional*):
130
+ Optional second list of IDs for sequence pairs.
131
+
132
+ Returns:
133
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
134
+ """
135
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
136
+
137
+ if token_ids_1 is not None:
138
+ output += token_ids_1 + [self.sep_token_id]
139
+
140
+ return output
141
+
142
+ def create_token_type_ids_from_sequences(
143
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
144
+ ) -> List[int]:
145
+ """
146
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT sequence
147
+ pair mask has the following format:
148
+
149
+ ```
150
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
151
+ | first sequence | second sequence |
152
+ ```
153
+
154
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
155
+
156
+ Args:
157
+ token_ids_0 (`List[int]`):
158
+ List of IDs.
159
+ token_ids_1 (`List[int]`, *optional*):
160
+ Optional second list of IDs for sequence pairs.
161
+
162
+ Returns:
163
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
164
+ """
165
+ sep = [self.sep_token_id]
166
+ cls = [self.cls_token_id]
167
+ if token_ids_1 is None:
168
+ return len(cls + token_ids_0 + sep) * [0]
169
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
170
+
171
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
172
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
173
+ return tuple(files)