applied-ai-018 commited on
Commit
7dcc96a
·
verified ·
1 Parent(s): f2671d9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__init__.py +71 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/configuration_bigbird_pegasus.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/convert_bigbird_pegasus_tf_to_pytorch.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/modeling_bigbird_pegasus.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +422 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py +170 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +0 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py +82 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py +117 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py +1061 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py +429 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__init__.py +68 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/__init__.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/configuration_falcon.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/convert_custom_code_checkpoint.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/modeling_falcon.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/configuration_falcon.py +192 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/convert_custom_code_checkpoint.py +74 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/modeling_falcon.py +1648 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py +67 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py +200 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py +1174 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py +804 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__init__.py +73 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/configuration_levit.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/convert_levit_timm_to_pytorch.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/feature_extraction_levit.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/image_processing_levit.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/modeling_levit.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/configuration_levit.py +146 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/convert_levit_timm_to_pytorch.py +181 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/feature_extraction_levit.py +33 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/image_processing_levit.py +325 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/models/levit/modeling_levit.py +739 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py +110 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py +185 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py +312 -0
env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_bigbird_pegasus": [
21
+ "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "BigBirdPegasusConfig",
23
+ "BigBirdPegasusOnnxConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_bigbird_pegasus"] = [
34
+ "BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "BigBirdPegasusForCausalLM",
36
+ "BigBirdPegasusForConditionalGeneration",
37
+ "BigBirdPegasusForQuestionAnswering",
38
+ "BigBirdPegasusForSequenceClassification",
39
+ "BigBirdPegasusModel",
40
+ "BigBirdPegasusPreTrainedModel",
41
+ ]
42
+
43
+
44
+ if TYPE_CHECKING:
45
+ from .configuration_bigbird_pegasus import (
46
+ BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
47
+ BigBirdPegasusConfig,
48
+ BigBirdPegasusOnnxConfig,
49
+ )
50
+
51
+ try:
52
+ if not is_torch_available():
53
+ raise OptionalDependencyNotAvailable()
54
+ except OptionalDependencyNotAvailable:
55
+ pass
56
+ else:
57
+ from .modeling_bigbird_pegasus import (
58
+ BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
59
+ BigBirdPegasusForCausalLM,
60
+ BigBirdPegasusForConditionalGeneration,
61
+ BigBirdPegasusForQuestionAnswering,
62
+ BigBirdPegasusForSequenceClassification,
63
+ BigBirdPegasusModel,
64
+ BigBirdPegasusPreTrainedModel,
65
+ )
66
+
67
+
68
+ else:
69
+ import sys
70
+
71
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/configuration_bigbird_pegasus.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/convert_bigbird_pegasus_tf_to_pytorch.cpython-310.pyc ADDED
Binary file (5.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/modeling_bigbird_pegasus.cpython-310.pyc ADDED
Binary file (85.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Google Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ BigBirdPegasus model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Any, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
23
+ from ...onnx.utils import compute_effective_axis_dimension
24
+ from ...utils import TensorType, is_torch_available, logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP = {
30
+ "google/bigbird-pegasus-large-arxiv": (
31
+ "https://huggingface.co/google/bigbird-pegasus-large-arxiv/resolve/main/config.json"
32
+ ),
33
+ "google/bigbird-pegasus-large-pubmed": (
34
+ "https://huggingface.co/google/bigbird-pegasus-large-pubmed/resolve/main/config.json"
35
+ ),
36
+ "google/bigbird-pegasus-large-bigpatent": (
37
+ "https://huggingface.co/google/bigbird-pegasus-large-bigpatent/resolve/main/config.json"
38
+ ),
39
+ # See all BigBirdPegasus models at https://huggingface.co/models?filter=bigbird_pegasus
40
+ }
41
+
42
+
43
+ class BigBirdPegasusConfig(PretrainedConfig):
44
+ r"""
45
+ This is the configuration class to store the configuration of a [`BigBirdPegasusModel`]. It is used to instantiate
46
+ an BigBirdPegasus model according to the specified arguments, defining the model architecture. Instantiating a
47
+ configuration with the defaults will yield a similar configuration to that of the BigBirdPegasus
48
+ [google/bigbird-pegasus-large-arxiv](https://huggingface.co/google/bigbird-pegasus-large-arxiv) architecture.
49
+
50
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
51
+ documentation from [`PretrainedConfig`] for more information.
52
+
53
+
54
+ Args:
55
+ vocab_size (`int`, *optional*, defaults to 96103):
56
+ Vocabulary size of the BigBirdPegasus model. Defines the number of different tokens that can be represented
57
+ by the `inputs_ids` passed when calling [`BigBirdPegasusModel`].
58
+ d_model (`int`, *optional*, defaults to 1024):
59
+ Dimension of the layers and the pooler layer.
60
+ encoder_layers (`int`, *optional*, defaults to 16):
61
+ Number of encoder layers.
62
+ decoder_layers (`int`, *optional*, defaults to 16):
63
+ Number of decoder layers.
64
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
65
+ Number of attention heads for each attention layer in the Transformer encoder.
66
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
67
+ Number of attention heads for each attention layer in the Transformer decoder.
68
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
69
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
70
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
71
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
72
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu_new"`):
73
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
74
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
75
+ dropout (`float`, *optional*, defaults to 0.1):
76
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
77
+ attention_dropout (`float`, *optional*, defaults to 0.0):
78
+ The dropout ratio for the attention probabilities.
79
+ activation_dropout (`float`, *optional*, defaults to 0.0):
80
+ The dropout ratio for activations inside the fully connected layer.
81
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
82
+ The dropout ratio for classifier.
83
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
84
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
85
+ just in case (e.g., 1024 or 2048 or 4096).
86
+ init_std (`float`, *optional*, defaults to 0.02):
87
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
88
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
89
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
90
+ for more details.
91
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
92
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
93
+ for more details.
94
+ use_cache (`bool`, *optional*, defaults to `True`):
95
+ Whether or not the model should return the last key/values attentions (not used by all models).
96
+ attention_type (`str`, *optional*, defaults to `"block_sparse"`)
97
+ Whether to use block sparse attention (with n complexity) as introduced in paper or original attention
98
+ layer (with n^2 complexity) in encoder. Possible values are `"original_full"` and `"block_sparse"`.
99
+ use_bias (`bool`, *optional*, defaults to `False`)
100
+ Whether to use bias in query, key, value.
101
+ block_size (`int`, *optional*, defaults to 64)
102
+ Size of each block. Useful only when `attention_type == "block_sparse"`.
103
+ num_random_blocks (`int`, *optional*, defaults to 3)
104
+ Each query is going to attend these many number of random blocks. Useful only when `attention_type ==
105
+ "block_sparse"`.
106
+ scale_embeddings (`bool`, *optional*, defaults to `True`)
107
+ Whether to rescale embeddings with (hidden_size ** 0.5).
108
+
109
+ Example:
110
+
111
+ ```python
112
+ >>> from transformers import BigBirdPegasusConfig, BigBirdPegasusModel
113
+
114
+ >>> # Initializing a BigBirdPegasus bigbird-pegasus-base style configuration
115
+ >>> configuration = BigBirdPegasusConfig()
116
+
117
+ >>> # Initializing a model (with random weights) from the bigbird-pegasus-base style configuration
118
+ >>> model = BigBirdPegasusModel(configuration)
119
+
120
+ >>> # Accessing the model configuration
121
+ >>> configuration = model.config
122
+ ```"""
123
+
124
+ model_type = "bigbird_pegasus"
125
+ keys_to_ignore_at_inference = ["past_key_values"]
126
+ attribute_map = {
127
+ "num_attention_heads": "encoder_attention_heads",
128
+ "hidden_size": "d_model",
129
+ "attention_probs_dropout_prob": "attention_dropout",
130
+ }
131
+
132
+ def __init__(
133
+ self,
134
+ vocab_size=96103,
135
+ max_position_embeddings=4096,
136
+ encoder_layers=16,
137
+ encoder_ffn_dim=4096,
138
+ encoder_attention_heads=16,
139
+ decoder_layers=16,
140
+ decoder_ffn_dim=4096,
141
+ decoder_attention_heads=16,
142
+ encoder_layerdrop=0.0,
143
+ decoder_layerdrop=0.0,
144
+ use_cache=True,
145
+ is_encoder_decoder=True,
146
+ activation_function="gelu_new",
147
+ d_model=1024,
148
+ dropout=0.1,
149
+ attention_dropout=0.0,
150
+ activation_dropout=0.0,
151
+ init_std=0.02,
152
+ decoder_start_token_id=2,
153
+ classifier_dropout=0.0,
154
+ scale_embedding=True,
155
+ pad_token_id=0,
156
+ bos_token_id=2,
157
+ eos_token_id=1,
158
+ attention_type="block_sparse", # only for encoder
159
+ block_size=64,
160
+ num_random_blocks=3,
161
+ use_bias=False,
162
+ **kwargs,
163
+ ):
164
+ self.vocab_size = vocab_size
165
+ self.max_position_embeddings = max_position_embeddings
166
+ self.d_model = d_model
167
+ self.encoder_ffn_dim = encoder_ffn_dim
168
+ self.encoder_layers = encoder_layers
169
+ self.encoder_attention_heads = encoder_attention_heads
170
+ self.decoder_ffn_dim = decoder_ffn_dim
171
+ self.decoder_layers = decoder_layers
172
+ self.decoder_attention_heads = decoder_attention_heads
173
+ self.dropout = dropout
174
+ self.attention_dropout = attention_dropout
175
+ self.activation_dropout = activation_dropout
176
+ self.activation_function = activation_function
177
+ self.init_std = init_std
178
+ self.encoder_layerdrop = encoder_layerdrop
179
+ self.decoder_layerdrop = decoder_layerdrop
180
+ self.classifier_dropout = classifier_dropout
181
+ self.use_cache = use_cache
182
+ self.num_hidden_layers = encoder_layers
183
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
184
+
185
+ # extra config
186
+ self.attention_type = attention_type
187
+ self.block_size = block_size
188
+ self.num_random_blocks = num_random_blocks
189
+ self.use_bias = use_bias
190
+
191
+ super().__init__(
192
+ pad_token_id=pad_token_id,
193
+ bos_token_id=bos_token_id,
194
+ eos_token_id=eos_token_id,
195
+ is_encoder_decoder=is_encoder_decoder,
196
+ decoder_start_token_id=decoder_start_token_id,
197
+ **kwargs,
198
+ )
199
+
200
+
201
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig
202
+ class BigBirdPegasusOnnxConfig(OnnxSeq2SeqConfigWithPast):
203
+ @property
204
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
205
+ if self.task in ["default", "seq2seq-lm"]:
206
+ common_inputs = OrderedDict(
207
+ [
208
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
209
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
210
+ ]
211
+ )
212
+
213
+ if self.use_past:
214
+ common_inputs["decoder_input_ids"] = {0: "batch"}
215
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
216
+ else:
217
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
218
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
219
+
220
+ if self.use_past:
221
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
222
+ elif self.task == "causal-lm":
223
+ # TODO: figure this case out.
224
+ common_inputs = OrderedDict(
225
+ [
226
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
227
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
228
+ ]
229
+ )
230
+ if self.use_past:
231
+ num_encoder_layers, _ = self.num_layers
232
+ for i in range(num_encoder_layers):
233
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
234
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
235
+ else:
236
+ common_inputs = OrderedDict(
237
+ [
238
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
239
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
240
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
241
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
242
+ ]
243
+ )
244
+
245
+ return common_inputs
246
+
247
+ @property
248
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
249
+ if self.task in ["default", "seq2seq-lm"]:
250
+ common_outputs = super().outputs
251
+ else:
252
+ common_outputs = super(OnnxConfigWithPast, self).outputs
253
+ if self.use_past:
254
+ num_encoder_layers, _ = self.num_layers
255
+ for i in range(num_encoder_layers):
256
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
257
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
258
+ return common_outputs
259
+
260
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
261
+ self,
262
+ tokenizer: PreTrainedTokenizer,
263
+ batch_size: int = -1,
264
+ seq_length: int = -1,
265
+ is_pair: bool = False,
266
+ framework: Optional[TensorType] = None,
267
+ ) -> Mapping[str, Any]:
268
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
269
+ tokenizer, batch_size, seq_length, is_pair, framework
270
+ )
271
+
272
+ # Generate decoder inputs
273
+ decoder_seq_length = seq_length if not self.use_past else 1
274
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
275
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
276
+ )
277
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
278
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
279
+
280
+ if self.use_past:
281
+ if not is_torch_available():
282
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
283
+ else:
284
+ import torch
285
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
286
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
287
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
288
+ encoder_shape = (
289
+ batch,
290
+ num_encoder_attention_heads,
291
+ encoder_seq_length,
292
+ self._config.hidden_size // num_encoder_attention_heads,
293
+ )
294
+ decoder_past_length = decoder_seq_length + 3
295
+ decoder_shape = (
296
+ batch,
297
+ num_decoder_attention_heads,
298
+ decoder_past_length,
299
+ self._config.hidden_size // num_decoder_attention_heads,
300
+ )
301
+
302
+ common_inputs["decoder_attention_mask"] = torch.cat(
303
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
304
+ )
305
+
306
+ common_inputs["past_key_values"] = []
307
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
308
+ num_encoder_layers, num_decoder_layers = self.num_layers
309
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
310
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
311
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
312
+
313
+ for _ in range(min_num_layers):
314
+ common_inputs["past_key_values"].append(
315
+ (
316
+ torch.zeros(decoder_shape),
317
+ torch.zeros(decoder_shape),
318
+ torch.zeros(encoder_shape),
319
+ torch.zeros(encoder_shape),
320
+ )
321
+ )
322
+ # TODO: test this.
323
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
324
+ for _ in range(min_num_layers, max_num_layers):
325
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
326
+ return common_inputs
327
+
328
+ def _generate_dummy_inputs_for_causal_lm(
329
+ self,
330
+ tokenizer: PreTrainedTokenizer,
331
+ batch_size: int = -1,
332
+ seq_length: int = -1,
333
+ is_pair: bool = False,
334
+ framework: Optional[TensorType] = None,
335
+ ) -> Mapping[str, Any]:
336
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
337
+ tokenizer, batch_size, seq_length, is_pair, framework
338
+ )
339
+
340
+ if self.use_past:
341
+ if not is_torch_available():
342
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
343
+ else:
344
+ import torch
345
+ batch, seqlen = common_inputs["input_ids"].shape
346
+ # Not using the same length for past_key_values
347
+ past_key_values_length = seqlen + 2
348
+ num_encoder_layers, _ = self.num_layers
349
+ num_encoder_attention_heads, _ = self.num_attention_heads
350
+ past_shape = (
351
+ batch,
352
+ num_encoder_attention_heads,
353
+ past_key_values_length,
354
+ self._config.hidden_size // num_encoder_attention_heads,
355
+ )
356
+
357
+ mask_dtype = common_inputs["attention_mask"].dtype
358
+ common_inputs["attention_mask"] = torch.cat(
359
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
360
+ )
361
+ common_inputs["past_key_values"] = [
362
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
363
+ ]
364
+ return common_inputs
365
+
366
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
367
+ self,
368
+ tokenizer: PreTrainedTokenizer,
369
+ batch_size: int = -1,
370
+ seq_length: int = -1,
371
+ is_pair: bool = False,
372
+ framework: Optional[TensorType] = None,
373
+ ) -> Mapping[str, Any]:
374
+ # Copied from OnnxConfig.generate_dummy_inputs
375
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
376
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
377
+ batch_size = compute_effective_axis_dimension(
378
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
379
+ )
380
+
381
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
382
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
383
+ seq_length = compute_effective_axis_dimension(
384
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
385
+ )
386
+
387
+ # Generate dummy inputs according to compute batch and sequence
388
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
389
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
390
+ return common_inputs
391
+
392
+ def generate_dummy_inputs(
393
+ self,
394
+ tokenizer: PreTrainedTokenizer,
395
+ batch_size: int = -1,
396
+ seq_length: int = -1,
397
+ is_pair: bool = False,
398
+ framework: Optional[TensorType] = None,
399
+ ) -> Mapping[str, Any]:
400
+ if self.task in ["default", "seq2seq-lm"]:
401
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
402
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
403
+ )
404
+
405
+ elif self.task == "causal-lm":
406
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
407
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
408
+ )
409
+ else:
410
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
411
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
412
+ )
413
+
414
+ return common_inputs
415
+
416
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
417
+ if self.task in ["default", "seq2seq-lm"]:
418
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
419
+ else:
420
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
421
+ flattened_output, name, idx, t
422
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ from typing import Dict
18
+
19
+ import tensorflow as tf
20
+ import torch
21
+ from tqdm import tqdm
22
+
23
+ from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
24
+
25
+
26
+ INIT_COMMON = [
27
+ # tf -> hf
28
+ ("/", "."),
29
+ ("layer_", "layers."),
30
+ ("kernel", "weight"),
31
+ ("beta", "bias"),
32
+ ("gamma", "weight"),
33
+ ("pegasus", "model"),
34
+ ]
35
+ END_COMMON = [
36
+ (".output.dense", ".fc2"),
37
+ ("intermediate.LayerNorm", "final_layer_norm"),
38
+ ("intermediate.dense", "fc1"),
39
+ ]
40
+
41
+ DECODER_PATTERNS = (
42
+ INIT_COMMON
43
+ + [
44
+ ("attention.self.LayerNorm", "self_attn_layer_norm"),
45
+ ("attention.output.dense", "self_attn.out_proj"),
46
+ ("attention.self", "self_attn"),
47
+ ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
48
+ ("attention.encdec_output.dense", "encoder_attn.out_proj"),
49
+ ("attention.encdec", "encoder_attn"),
50
+ ("key", "k_proj"),
51
+ ("value", "v_proj"),
52
+ ("query", "q_proj"),
53
+ ("decoder.LayerNorm", "decoder.layernorm_embedding"),
54
+ ]
55
+ + END_COMMON
56
+ )
57
+
58
+ REMAINING_PATTERNS = (
59
+ INIT_COMMON
60
+ + [
61
+ ("embeddings.word_embeddings", "shared.weight"),
62
+ ("embeddings.position_embeddings", "embed_positions.weight"),
63
+ ("attention.self.LayerNorm", "self_attn_layer_norm"),
64
+ ("attention.output.dense", "self_attn.output"),
65
+ ("attention.self", "self_attn.self"),
66
+ ("encoder.LayerNorm", "encoder.layernorm_embedding"),
67
+ ]
68
+ + END_COMMON
69
+ )
70
+
71
+ KEYS_TO_IGNORE = [
72
+ "encdec/key/bias",
73
+ "encdec/query/bias",
74
+ "encdec/value/bias",
75
+ "self/key/bias",
76
+ "self/query/bias",
77
+ "self/value/bias",
78
+ "encdec_output/dense/bias",
79
+ "attention/output/dense/bias",
80
+ ]
81
+
82
+
83
+ def rename_state_dict_key(k, patterns):
84
+ for tf_name, hf_name in patterns:
85
+ k = k.replace(tf_name, hf_name)
86
+ return k
87
+
88
+
89
+ def convert_bigbird_pegasus(tf_weights: dict, config_update: dict) -> BigBirdPegasusForConditionalGeneration:
90
+ cfg = BigBirdPegasusConfig(**config_update)
91
+ torch_model = BigBirdPegasusForConditionalGeneration(cfg)
92
+ state_dict = torch_model.state_dict()
93
+ mapping = {}
94
+
95
+ # separating decoder weights
96
+ decoder_weights = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder")}
97
+ remaining_weights = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder")}
98
+
99
+ for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion"):
100
+ conditions = [k.endswith(ending) for ending in KEYS_TO_IGNORE]
101
+ if any(conditions):
102
+ continue
103
+ patterns = DECODER_PATTERNS
104
+ new_k = rename_state_dict_key(k, patterns)
105
+ if new_k not in state_dict:
106
+ raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
107
+ if any(True if i in k else False for i in ["dense", "query", "key", "value"]):
108
+ v = v.T
109
+ mapping[new_k] = torch.from_numpy(v)
110
+ assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
111
+
112
+ for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion"):
113
+ conditions = [k.endswith(ending) for ending in KEYS_TO_IGNORE]
114
+ if any(conditions):
115
+ continue
116
+ patterns = REMAINING_PATTERNS
117
+ new_k = rename_state_dict_key(k, patterns)
118
+ if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
119
+ raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
120
+ if any(True if i in k else False for i in ["dense", "query", "key", "value"]):
121
+ v = v.T
122
+ mapping[new_k] = torch.from_numpy(v)
123
+ if k != "pegasus/embeddings/position_embeddings":
124
+ assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
125
+
126
+ mapping["model.encoder.embed_positions.weight"] = mapping["model.embed_positions.weight"]
127
+ mapping["model.decoder.embed_positions.weight"] = mapping.pop("model.embed_positions.weight")
128
+ missing, extra = torch_model.load_state_dict(mapping, strict=False)
129
+ unexpected_missing = [
130
+ k
131
+ for k in missing
132
+ if k
133
+ not in [
134
+ "final_logits_bias",
135
+ "model.encoder.embed_tokens.weight",
136
+ "model.decoder.embed_tokens.weight",
137
+ "lm_head.weight",
138
+ ]
139
+ ]
140
+ assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
141
+ assert extra == [], f"no matches found for the following tf keys {extra}"
142
+ return torch_model
143
+
144
+
145
+ def get_tf_weights_as_numpy(path) -> Dict:
146
+ init_vars = tf.train.list_variables(path)
147
+ tf_weights = {}
148
+ ignore_name = ["global_step"]
149
+ for name, shape in tqdm(init_vars, desc="converting tf checkpoint to dict"):
150
+ skip_key = any(pat in name for pat in ignore_name)
151
+ if skip_key:
152
+ continue
153
+ array = tf.train.load_variable(path, name)
154
+ tf_weights[name] = array
155
+ return tf_weights
156
+
157
+
158
+ def convert_bigbird_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str, config_update: dict):
159
+ tf_weights = get_tf_weights_as_numpy(ckpt_path)
160
+ torch_model = convert_bigbird_pegasus(tf_weights, config_update)
161
+ torch_model.save_pretrained(save_dir)
162
+
163
+
164
+ if __name__ == "__main__":
165
+ parser = argparse.ArgumentParser()
166
+ parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
167
+ parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
168
+ args = parser.parse_args()
169
+ config_update = {}
170
+ convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
env-llmeval/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace and Baidu Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_ernie_m": ["ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieMConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_sentencepiece_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["tokenization_ernie_m"] = ["ErnieMTokenizer"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_ernie_m"] = [
39
+ "ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "ErnieMForMultipleChoice",
41
+ "ErnieMForQuestionAnswering",
42
+ "ErnieMForSequenceClassification",
43
+ "ErnieMForTokenClassification",
44
+ "ErnieMModel",
45
+ "ErnieMPreTrainedModel",
46
+ "ErnieMForInformationExtraction",
47
+ ]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_ernie_m import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieMConfig
52
+
53
+ try:
54
+ if not is_sentencepiece_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .tokenization_ernie_m import ErnieMTokenizer
60
+
61
+ try:
62
+ if not is_torch_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ from .modeling_ernie_m import (
68
+ ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST,
69
+ ErnieMForInformationExtraction,
70
+ ErnieMForMultipleChoice,
71
+ ErnieMForQuestionAnswering,
72
+ ErnieMForSequenceClassification,
73
+ ErnieMForTokenClassification,
74
+ ErnieMModel,
75
+ ErnieMPreTrainedModel,
76
+ )
77
+
78
+
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc ADDED
Binary file (5.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc ADDED
Binary file (29.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ErnieM model configuration"""
16
+ # Adapted from original paddlenlp repository.(https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/transformers/ernie_m/configuration.py)
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import Dict
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+
24
+
25
+ ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP = {
26
+ "susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
27
+ "susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
28
+ }
29
+
30
+
31
+ class ErnieMConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`ErnieMModel`]. It is used to instantiate a
34
+ Ernie-M model according to the specified arguments, defining the model architecture. Instantiating a configuration
35
+ with the defaults will yield a similar configuration to that of the `Ernie-M`
36
+ [susnato/ernie-m-base_pytorch](https://huggingface.co/susnato/ernie-m-base_pytorch) architecture.
37
+
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 250002):
44
+ Vocabulary size of `inputs_ids` in [`ErnieMModel`]. Also is the vocab size of token embedding matrix.
45
+ Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling
46
+ [`ErnieMModel`].
47
+ hidden_size (`int`, *optional*, defaults to 768):
48
+ Dimensionality of the embedding layer, encoder layers and pooler layer.
49
+ num_hidden_layers (`int`, *optional*, defaults to 12):
50
+ Number of hidden layers in the Transformer encoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ intermediate_size (`int`, *optional*, defaults to 3072):
54
+ Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to feed-forward layers are
55
+ firstly projected from hidden_size to intermediate_size, and then projected back to hidden_size. Typically
56
+ intermediate_size is larger than hidden_size.
57
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function in the feed-forward layer. `"gelu"`, `"relu"` and any other torch
59
+ supported activation functions are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings and encoder.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout probability used in `MultiHeadAttention` in all encoder layers to drop some attention target.
64
+ max_position_embeddings (`int`, *optional*, defaults to 514):
65
+ The maximum value of the dimensionality of position encoding, which dictates the maximum supported length
66
+ of an input sequence.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the normal initializer for initializing all weight matrices. The index of padding
69
+ token in the token vocabulary.
70
+ pad_token_id (`int`, *optional*, defaults to 1):
71
+ Padding token id.
72
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
73
+ The epsilon used by the layer normalization layers.
74
+ classifier_dropout (`float`, *optional*):
75
+ The dropout ratio for the classification head.
76
+ act_dropout (`float`, *optional*, defaults to 0.0):
77
+ This dropout probability is used in `ErnieMEncoderLayer` after activation.
78
+
79
+ A normal_initializer initializes weight matrices as normal distributions. See
80
+ `ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`.
81
+ """
82
+
83
+ model_type = "ernie_m"
84
+ attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
85
+
86
+ def __init__(
87
+ self,
88
+ vocab_size: int = 250002,
89
+ hidden_size: int = 768,
90
+ num_hidden_layers: int = 12,
91
+ num_attention_heads: int = 12,
92
+ intermediate_size: int = 3072,
93
+ hidden_act: str = "gelu",
94
+ hidden_dropout_prob: float = 0.1,
95
+ attention_probs_dropout_prob: float = 0.1,
96
+ max_position_embeddings: int = 514,
97
+ initializer_range: float = 0.02,
98
+ pad_token_id: int = 1,
99
+ layer_norm_eps: float = 1e-05,
100
+ classifier_dropout=None,
101
+ act_dropout=0.0,
102
+ **kwargs,
103
+ ):
104
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
105
+ self.vocab_size = vocab_size
106
+ self.hidden_size = hidden_size
107
+ self.num_hidden_layers = num_hidden_layers
108
+ self.num_attention_heads = num_attention_heads
109
+ self.intermediate_size = intermediate_size
110
+ self.hidden_act = hidden_act
111
+ self.hidden_dropout_prob = hidden_dropout_prob
112
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
113
+ self.max_position_embeddings = max_position_embeddings
114
+ self.initializer_range = initializer_range
115
+ self.layer_norm_eps = layer_norm_eps
116
+ self.classifier_dropout = classifier_dropout
117
+ self.act_dropout = act_dropout
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py ADDED
@@ -0,0 +1,1061 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ErnieM model."""
16
+
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn, tensor
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ MultipleChoiceModelOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
37
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
38
+ from .configuration_ernie_m import ErnieMConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "susnato/ernie-m-base_pytorch"
44
+ _CONFIG_FOR_DOC = "ErnieMConfig"
45
+ _TOKENIZER_FOR_DOC = "ErnieMTokenizer"
46
+
47
+ ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = [
48
+ "susnato/ernie-m-base_pytorch",
49
+ "susnato/ernie-m-large_pytorch",
50
+ # See all ErnieM models at https://huggingface.co/models?filter=ernie_m
51
+ ]
52
+
53
+
54
+ # Adapted from paddlenlp.transformers.ernie_m.modeling.ErnieEmbeddings
55
+ class ErnieMEmbeddings(nn.Module):
56
+ """Construct the embeddings from word and position embeddings."""
57
+
58
+ def __init__(self, config):
59
+ super().__init__()
60
+ self.hidden_size = config.hidden_size
61
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
62
+ self.position_embeddings = nn.Embedding(
63
+ config.max_position_embeddings, config.hidden_size, padding_idx=config.pad_token_id
64
+ )
65
+ self.layer_norm = nn.LayerNorm(normalized_shape=config.hidden_size, eps=config.layer_norm_eps)
66
+ self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
67
+ self.padding_idx = config.pad_token_id
68
+
69
+ def forward(
70
+ self,
71
+ input_ids: Optional[torch.LongTensor] = None,
72
+ position_ids: Optional[torch.LongTensor] = None,
73
+ inputs_embeds: Optional[torch.LongTensor] = None,
74
+ past_key_values_length: int = 0,
75
+ ) -> torch.Tensor:
76
+ if inputs_embeds is None:
77
+ inputs_embeds = self.word_embeddings(input_ids)
78
+ if position_ids is None:
79
+ input_shape = inputs_embeds.size()[:-1]
80
+ ones = torch.ones(input_shape, dtype=torch.int64, device=inputs_embeds.device)
81
+ seq_length = torch.cumsum(ones, dim=1)
82
+ position_ids = seq_length - ones
83
+
84
+ if past_key_values_length > 0:
85
+ position_ids = position_ids + past_key_values_length
86
+ # to mimic paddlenlp implementation
87
+ position_ids += 2
88
+ position_embeddings = self.position_embeddings(position_ids)
89
+ embeddings = inputs_embeds + position_embeddings
90
+ embeddings = self.layer_norm(embeddings)
91
+ embeddings = self.dropout(embeddings)
92
+
93
+ return embeddings
94
+
95
+
96
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ErnieM,self.value->self.v_proj,self.key->self.k_proj,self.query->self.q_proj
97
+ class ErnieMSelfAttention(nn.Module):
98
+ def __init__(self, config, position_embedding_type=None):
99
+ super().__init__()
100
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
101
+ raise ValueError(
102
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
103
+ f"heads ({config.num_attention_heads})"
104
+ )
105
+
106
+ self.num_attention_heads = config.num_attention_heads
107
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
108
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
109
+
110
+ self.q_proj = nn.Linear(config.hidden_size, self.all_head_size)
111
+ self.k_proj = nn.Linear(config.hidden_size, self.all_head_size)
112
+ self.v_proj = nn.Linear(config.hidden_size, self.all_head_size)
113
+
114
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
115
+ self.position_embedding_type = position_embedding_type or getattr(
116
+ config, "position_embedding_type", "absolute"
117
+ )
118
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
119
+ self.max_position_embeddings = config.max_position_embeddings
120
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
121
+
122
+ self.is_decoder = config.is_decoder
123
+
124
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
125
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
126
+ x = x.view(new_x_shape)
127
+ return x.permute(0, 2, 1, 3)
128
+
129
+ def forward(
130
+ self,
131
+ hidden_states: torch.Tensor,
132
+ attention_mask: Optional[torch.FloatTensor] = None,
133
+ head_mask: Optional[torch.FloatTensor] = None,
134
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
135
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
136
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
137
+ output_attentions: Optional[bool] = False,
138
+ ) -> Tuple[torch.Tensor]:
139
+ mixed_query_layer = self.q_proj(hidden_states)
140
+
141
+ # If this is instantiated as a cross-attention module, the keys
142
+ # and values come from an encoder; the attention mask needs to be
143
+ # such that the encoder's padding tokens are not attended to.
144
+ is_cross_attention = encoder_hidden_states is not None
145
+
146
+ if is_cross_attention and past_key_value is not None:
147
+ # reuse k,v, cross_attentions
148
+ key_layer = past_key_value[0]
149
+ value_layer = past_key_value[1]
150
+ attention_mask = encoder_attention_mask
151
+ elif is_cross_attention:
152
+ key_layer = self.transpose_for_scores(self.k_proj(encoder_hidden_states))
153
+ value_layer = self.transpose_for_scores(self.v_proj(encoder_hidden_states))
154
+ attention_mask = encoder_attention_mask
155
+ elif past_key_value is not None:
156
+ key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
157
+ value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
158
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
159
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
160
+ else:
161
+ key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
162
+ value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
163
+
164
+ query_layer = self.transpose_for_scores(mixed_query_layer)
165
+
166
+ use_cache = past_key_value is not None
167
+ if self.is_decoder:
168
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
169
+ # Further calls to cross_attention layer can then reuse all cross-attention
170
+ # key/value_states (first "if" case)
171
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
172
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
173
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
174
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
175
+ past_key_value = (key_layer, value_layer)
176
+
177
+ # Take the dot product between "query" and "key" to get the raw attention scores.
178
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
179
+
180
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
181
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
182
+ if use_cache:
183
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
184
+ -1, 1
185
+ )
186
+ else:
187
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
188
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
189
+ distance = position_ids_l - position_ids_r
190
+
191
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
192
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
193
+
194
+ if self.position_embedding_type == "relative_key":
195
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
196
+ attention_scores = attention_scores + relative_position_scores
197
+ elif self.position_embedding_type == "relative_key_query":
198
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
199
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
200
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
201
+
202
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
203
+ if attention_mask is not None:
204
+ # Apply the attention mask is (precomputed for all layers in ErnieMModel forward() function)
205
+ attention_scores = attention_scores + attention_mask
206
+
207
+ # Normalize the attention scores to probabilities.
208
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
209
+
210
+ # This is actually dropping out entire tokens to attend to, which might
211
+ # seem a bit unusual, but is taken from the original Transformer paper.
212
+ attention_probs = self.dropout(attention_probs)
213
+
214
+ # Mask heads if we want to
215
+ if head_mask is not None:
216
+ attention_probs = attention_probs * head_mask
217
+
218
+ context_layer = torch.matmul(attention_probs, value_layer)
219
+
220
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
221
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
222
+ context_layer = context_layer.view(new_context_layer_shape)
223
+
224
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
225
+
226
+ if self.is_decoder:
227
+ outputs = outputs + (past_key_value,)
228
+ return outputs
229
+
230
+
231
+ class ErnieMAttention(nn.Module):
232
+ def __init__(self, config, position_embedding_type=None):
233
+ super().__init__()
234
+ self.self_attn = ErnieMSelfAttention(config, position_embedding_type=position_embedding_type)
235
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
236
+ self.pruned_heads = set()
237
+
238
+ def prune_heads(self, heads):
239
+ if len(heads) == 0:
240
+ return
241
+ heads, index = find_pruneable_heads_and_indices(
242
+ heads, self.self_attn.num_attention_heads, self.self_attn.attention_head_size, self.pruned_heads
243
+ )
244
+
245
+ # Prune linear layers
246
+ self.self_attn.q_proj = prune_linear_layer(self.self_attn.q_proj, index)
247
+ self.self_attn.k_proj = prune_linear_layer(self.self_attn.k_proj, index)
248
+ self.self_attn.v_proj = prune_linear_layer(self.self_attn.v_proj, index)
249
+ self.out_proj = prune_linear_layer(self.out_proj, index, dim=1)
250
+
251
+ # Update hyper params and store pruned heads
252
+ self.self_attn.num_attention_heads = self.self_attn.num_attention_heads - len(heads)
253
+ self.self_attn.all_head_size = self.self_attn.attention_head_size * self.self_attn.num_attention_heads
254
+ self.pruned_heads = self.pruned_heads.union(heads)
255
+
256
+ def forward(
257
+ self,
258
+ hidden_states: torch.Tensor,
259
+ attention_mask: Optional[torch.FloatTensor] = None,
260
+ head_mask: Optional[torch.FloatTensor] = None,
261
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
262
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
263
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
264
+ output_attentions: Optional[bool] = False,
265
+ ) -> Tuple[torch.Tensor]:
266
+ self_outputs = self.self_attn(
267
+ hidden_states,
268
+ attention_mask,
269
+ head_mask,
270
+ encoder_hidden_states,
271
+ encoder_attention_mask,
272
+ past_key_value,
273
+ output_attentions,
274
+ )
275
+ attention_output = self.out_proj(self_outputs[0])
276
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
277
+ return outputs
278
+
279
+
280
+ class ErnieMEncoderLayer(nn.Module):
281
+ def __init__(self, config):
282
+ super().__init__()
283
+ # to mimic paddlenlp implementation
284
+ dropout = 0.1 if config.hidden_dropout_prob is None else config.hidden_dropout_prob
285
+ act_dropout = config.hidden_dropout_prob if config.act_dropout is None else config.act_dropout
286
+
287
+ self.self_attn = ErnieMAttention(config)
288
+ self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size)
289
+ self.dropout = nn.Dropout(act_dropout)
290
+ self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size)
291
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
292
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
293
+ self.dropout1 = nn.Dropout(dropout)
294
+ self.dropout2 = nn.Dropout(dropout)
295
+ if isinstance(config.hidden_act, str):
296
+ self.activation = ACT2FN[config.hidden_act]
297
+ else:
298
+ self.activation = config.hidden_act
299
+
300
+ def forward(
301
+ self,
302
+ hidden_states: torch.Tensor,
303
+ attention_mask: Optional[torch.FloatTensor] = None,
304
+ head_mask: Optional[torch.FloatTensor] = None,
305
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
306
+ output_attentions: Optional[bool] = True,
307
+ ):
308
+ residual = hidden_states
309
+ if output_attentions:
310
+ hidden_states, attention_opt_weights = self.self_attn(
311
+ hidden_states=hidden_states,
312
+ attention_mask=attention_mask,
313
+ head_mask=head_mask,
314
+ past_key_value=past_key_value,
315
+ output_attentions=output_attentions,
316
+ )
317
+
318
+ else:
319
+ hidden_states = self.self_attn(
320
+ hidden_states=hidden_states,
321
+ attention_mask=attention_mask,
322
+ head_mask=head_mask,
323
+ past_key_value=past_key_value,
324
+ output_attentions=output_attentions,
325
+ )
326
+ hidden_states = residual + self.dropout1(hidden_states)
327
+ hidden_states = self.norm1(hidden_states)
328
+ residual = hidden_states
329
+
330
+ hidden_states = self.linear1(hidden_states)
331
+ hidden_states = self.activation(hidden_states)
332
+ hidden_states = self.dropout(hidden_states)
333
+ hidden_states = self.linear2(hidden_states)
334
+ hidden_states = residual + self.dropout2(hidden_states)
335
+ hidden_states = self.norm2(hidden_states)
336
+
337
+ if output_attentions:
338
+ return hidden_states, attention_opt_weights
339
+ else:
340
+ return hidden_states
341
+
342
+
343
+ class ErnieMEncoder(nn.Module):
344
+ def __init__(self, config):
345
+ super().__init__()
346
+ self.config = config
347
+ self.layers = nn.ModuleList([ErnieMEncoderLayer(config) for _ in range(config.num_hidden_layers)])
348
+
349
+ def forward(
350
+ self,
351
+ input_embeds: torch.Tensor,
352
+ attention_mask: Optional[torch.FloatTensor] = None,
353
+ head_mask: Optional[torch.FloatTensor] = None,
354
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
355
+ output_attentions: Optional[bool] = False,
356
+ output_hidden_states: Optional[bool] = False,
357
+ return_dict: Optional[bool] = True,
358
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
359
+ hidden_states = () if output_hidden_states else None
360
+ attentions = () if output_attentions else None
361
+
362
+ output = input_embeds
363
+ if output_hidden_states:
364
+ hidden_states = hidden_states + (output,)
365
+ for i, layer in enumerate(self.layers):
366
+ layer_head_mask = head_mask[i] if head_mask is not None else None
367
+ past_key_value = past_key_values[i] if past_key_values is not None else None
368
+
369
+ output, opt_attn_weights = layer(
370
+ hidden_states=output,
371
+ attention_mask=attention_mask,
372
+ head_mask=layer_head_mask,
373
+ past_key_value=past_key_value,
374
+ )
375
+
376
+ if output_hidden_states:
377
+ hidden_states = hidden_states + (output,)
378
+ if output_attentions:
379
+ attentions = attentions + (opt_attn_weights,)
380
+
381
+ last_hidden_state = output
382
+ if not return_dict:
383
+ return tuple(v for v in [last_hidden_state, hidden_states, attentions] if v is not None)
384
+
385
+ return BaseModelOutputWithPastAndCrossAttentions(
386
+ last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions
387
+ )
388
+
389
+
390
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ErnieM
391
+ class ErnieMPooler(nn.Module):
392
+ def __init__(self, config):
393
+ super().__init__()
394
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
395
+ self.activation = nn.Tanh()
396
+
397
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
398
+ # We "pool" the model by simply taking the hidden state corresponding
399
+ # to the first token.
400
+ first_token_tensor = hidden_states[:, 0]
401
+ pooled_output = self.dense(first_token_tensor)
402
+ pooled_output = self.activation(pooled_output)
403
+ return pooled_output
404
+
405
+
406
+ class ErnieMPreTrainedModel(PreTrainedModel):
407
+ """
408
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
409
+ models.
410
+ """
411
+
412
+ config_class = ErnieMConfig
413
+ base_model_prefix = "ernie_m"
414
+
415
+ def _init_weights(self, module):
416
+ """Initialize the weights"""
417
+ if isinstance(module, nn.Linear):
418
+ # Slightly different from the TF version which uses truncated_normal for initialization
419
+ # cf https://github.com/pytorch/pytorch/pull/5617
420
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
421
+ if module.bias is not None:
422
+ module.bias.data.zero_()
423
+ elif isinstance(module, nn.Embedding):
424
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
425
+ if module.padding_idx is not None:
426
+ module.weight.data[module.padding_idx].zero_()
427
+ elif isinstance(module, nn.LayerNorm):
428
+ module.bias.data.zero_()
429
+ module.weight.data.fill_(1.0)
430
+
431
+
432
+ ERNIE_M_START_DOCSTRING = r"""
433
+
434
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
435
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
436
+ etc.)
437
+
438
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
439
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
440
+ behavior.
441
+
442
+ Parameters:
443
+ config ([`ErnieMConfig`]): Model configuration class with all the parameters of the model.
444
+ Initializing with a config file does not load the weights associated with the model, only the
445
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
446
+ """
447
+
448
+ ERNIE_M_INPUTS_DOCSTRING = r"""
449
+ Args:
450
+ input_ids (`torch.LongTensor` of shape `({0})`):
451
+ Indices of input sequence tokens in the vocabulary.
452
+
453
+ Indices can be obtained using [`ErnieMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
454
+ [`PreTrainedTokenizer.__call__`] for details.
455
+
456
+ [What are input IDs?](../glossary#input-ids)
457
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
458
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
459
+
460
+ - 1 for tokens that are **not masked**,
461
+ - 0 for tokens that are **masked**.
462
+
463
+ [What are attention masks?](../glossary#attention-mask)
464
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
465
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
466
+ config.max_position_embeddings - 1]`.
467
+
468
+ [What are position IDs?](../glossary#position-ids)
469
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
470
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
471
+
472
+ - 1 indicates the head is **not masked**,
473
+ - 0 indicates the head is **masked**.
474
+
475
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
476
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
477
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
478
+ model's internal embedding lookup matrix.
479
+ output_attentions (`bool`, *optional*):
480
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
481
+ tensors for more detail.
482
+ output_hidden_states (`bool`, *optional*):
483
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
484
+ more detail.
485
+ return_dict (`bool`, *optional*):
486
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
487
+ """
488
+
489
+
490
+ @add_start_docstrings(
491
+ "The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.",
492
+ ERNIE_M_START_DOCSTRING,
493
+ )
494
+ class ErnieMModel(ErnieMPreTrainedModel):
495
+ def __init__(self, config, add_pooling_layer=True):
496
+ super(ErnieMModel, self).__init__(config)
497
+ self.initializer_range = config.initializer_range
498
+ self.embeddings = ErnieMEmbeddings(config)
499
+ self.encoder = ErnieMEncoder(config)
500
+ self.pooler = ErnieMPooler(config) if add_pooling_layer else None
501
+ self.post_init()
502
+
503
+ def get_input_embeddings(self):
504
+ return self.embeddings.word_embeddings
505
+
506
+ def set_input_embeddings(self, value):
507
+ self.embeddings.word_embeddings = value
508
+
509
+ def _prune_heads(self, heads_to_prune):
510
+ """
511
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
512
+ class PreTrainedModel
513
+ """
514
+ for layer, heads in heads_to_prune.items():
515
+ self.encoder.layers[layer].self_attn.prune_heads(heads)
516
+
517
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
518
+ @add_code_sample_docstrings(
519
+ processor_class=_TOKENIZER_FOR_DOC,
520
+ checkpoint=_CHECKPOINT_FOR_DOC,
521
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
522
+ config_class=_CONFIG_FOR_DOC,
523
+ )
524
+ def forward(
525
+ self,
526
+ input_ids: Optional[tensor] = None,
527
+ position_ids: Optional[tensor] = None,
528
+ attention_mask: Optional[tensor] = None,
529
+ head_mask: Optional[tensor] = None,
530
+ inputs_embeds: Optional[tensor] = None,
531
+ past_key_values: Optional[Tuple[Tuple[tensor]]] = None,
532
+ use_cache: Optional[bool] = None,
533
+ output_hidden_states: Optional[bool] = None,
534
+ output_attentions: Optional[bool] = None,
535
+ return_dict: Optional[bool] = None,
536
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
537
+ if input_ids is not None and inputs_embeds is not None:
538
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time.")
539
+
540
+ # init the default bool value
541
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
542
+ output_hidden_states = (
543
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
544
+ )
545
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
546
+
547
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
548
+
549
+ past_key_values_length = 0
550
+ if past_key_values is not None:
551
+ past_key_values_length = past_key_values[0][0].shape[2]
552
+
553
+ # Adapted from paddlenlp.transformers.ernie_m.ErnieMModel
554
+ if attention_mask is None:
555
+ attention_mask = (input_ids == self.config.pad_token_id).to(torch.float32)
556
+ attention_mask *= torch.finfo(attention_mask.dtype).min
557
+ if past_key_values is not None:
558
+ batch_size = past_key_values[0][0].shape[0]
559
+ past_mask = torch.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype)
560
+ attention_mask = torch.concat([past_mask, attention_mask], dim=-1)
561
+ # For 2D attention_mask from tokenizer
562
+ elif attention_mask.ndim == 2:
563
+ attention_mask = attention_mask.to(torch.float32)
564
+ attention_mask = 1.0 - attention_mask
565
+ attention_mask *= torch.finfo(attention_mask.dtype).min
566
+
567
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
568
+
569
+ embedding_output = self.embeddings(
570
+ input_ids=input_ids,
571
+ position_ids=position_ids,
572
+ inputs_embeds=inputs_embeds,
573
+ past_key_values_length=past_key_values_length,
574
+ )
575
+ encoder_outputs = self.encoder(
576
+ embedding_output,
577
+ attention_mask=extended_attention_mask,
578
+ head_mask=head_mask,
579
+ past_key_values=past_key_values,
580
+ output_attentions=output_attentions,
581
+ output_hidden_states=output_hidden_states,
582
+ return_dict=return_dict,
583
+ )
584
+
585
+ if not return_dict:
586
+ sequence_output = encoder_outputs[0]
587
+ pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
588
+ return (sequence_output, pooler_output) + encoder_outputs[1:]
589
+
590
+ sequence_output = encoder_outputs["last_hidden_state"]
591
+ pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
592
+ hidden_states = None if not output_hidden_states else encoder_outputs["hidden_states"]
593
+ attentions = None if not output_attentions else encoder_outputs["attentions"]
594
+
595
+ return BaseModelOutputWithPoolingAndCrossAttentions(
596
+ last_hidden_state=sequence_output,
597
+ pooler_output=pooler_output,
598
+ hidden_states=hidden_states,
599
+ attentions=attentions,
600
+ )
601
+
602
+
603
+ @add_start_docstrings(
604
+ """ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of
605
+ the pooled output) e.g. for GLUE tasks.""",
606
+ ERNIE_M_START_DOCSTRING,
607
+ )
608
+ class ErnieMForSequenceClassification(ErnieMPreTrainedModel):
609
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->ErnieM,bert->ernie_m
610
+ def __init__(self, config):
611
+ super().__init__(config)
612
+ self.num_labels = config.num_labels
613
+ self.config = config
614
+
615
+ self.ernie_m = ErnieMModel(config)
616
+ classifier_dropout = (
617
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
618
+ )
619
+ self.dropout = nn.Dropout(classifier_dropout)
620
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
621
+
622
+ # Initialize weights and apply final processing
623
+ self.post_init()
624
+
625
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
626
+ @add_code_sample_docstrings(
627
+ processor_class=_TOKENIZER_FOR_DOC,
628
+ checkpoint=_CHECKPOINT_FOR_DOC,
629
+ output_type=SequenceClassifierOutput,
630
+ config_class=_CONFIG_FOR_DOC,
631
+ )
632
+ def forward(
633
+ self,
634
+ input_ids: Optional[torch.Tensor] = None,
635
+ attention_mask: Optional[torch.Tensor] = None,
636
+ position_ids: Optional[torch.Tensor] = None,
637
+ head_mask: Optional[torch.Tensor] = None,
638
+ inputs_embeds: Optional[torch.Tensor] = None,
639
+ past_key_values: Optional[List[torch.Tensor]] = None,
640
+ use_cache: Optional[bool] = None,
641
+ output_hidden_states: Optional[bool] = None,
642
+ output_attentions: Optional[bool] = None,
643
+ return_dict: Optional[bool] = True,
644
+ labels: Optional[torch.Tensor] = None,
645
+ ) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]:
646
+ r"""
647
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
648
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
649
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
650
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
651
+ """
652
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
653
+
654
+ outputs = self.ernie_m(
655
+ input_ids,
656
+ attention_mask=attention_mask,
657
+ position_ids=position_ids,
658
+ head_mask=head_mask,
659
+ inputs_embeds=inputs_embeds,
660
+ past_key_values=past_key_values,
661
+ output_hidden_states=output_hidden_states,
662
+ output_attentions=output_attentions,
663
+ return_dict=return_dict,
664
+ )
665
+
666
+ pooled_output = outputs[1]
667
+
668
+ pooled_output = self.dropout(pooled_output)
669
+ logits = self.classifier(pooled_output)
670
+
671
+ loss = None
672
+ if labels is not None:
673
+ if self.config.problem_type is None:
674
+ if self.num_labels == 1:
675
+ self.config.problem_type = "regression"
676
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
677
+ self.config.problem_type = "single_label_classification"
678
+ else:
679
+ self.config.problem_type = "multi_label_classification"
680
+
681
+ if self.config.problem_type == "regression":
682
+ loss_fct = MSELoss()
683
+ if self.num_labels == 1:
684
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
685
+ else:
686
+ loss = loss_fct(logits, labels)
687
+ elif self.config.problem_type == "single_label_classification":
688
+ loss_fct = CrossEntropyLoss()
689
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
690
+ elif self.config.problem_type == "multi_label_classification":
691
+ loss_fct = BCEWithLogitsLoss()
692
+ loss = loss_fct(logits, labels)
693
+ if not return_dict:
694
+ output = (logits,) + outputs[2:]
695
+ return ((loss,) + output) if loss is not None else output
696
+
697
+ return SequenceClassifierOutput(
698
+ loss=loss,
699
+ logits=logits,
700
+ hidden_states=outputs.hidden_states,
701
+ attentions=outputs.attentions,
702
+ )
703
+
704
+
705
+ @add_start_docstrings(
706
+ """ErnieM Model with a multiple choice classification head on top (a linear layer on top of
707
+ the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""",
708
+ ERNIE_M_START_DOCSTRING,
709
+ )
710
+ class ErnieMForMultipleChoice(ErnieMPreTrainedModel):
711
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->ErnieM,bert->ernie_m
712
+ def __init__(self, config):
713
+ super().__init__(config)
714
+
715
+ self.ernie_m = ErnieMModel(config)
716
+ classifier_dropout = (
717
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
718
+ )
719
+ self.dropout = nn.Dropout(classifier_dropout)
720
+ self.classifier = nn.Linear(config.hidden_size, 1)
721
+
722
+ # Initialize weights and apply final processing
723
+ self.post_init()
724
+
725
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
726
+ @add_code_sample_docstrings(
727
+ checkpoint=_CHECKPOINT_FOR_DOC,
728
+ output_type=MultipleChoiceModelOutput,
729
+ config_class=_CONFIG_FOR_DOC,
730
+ )
731
+ def forward(
732
+ self,
733
+ input_ids: Optional[torch.Tensor] = None,
734
+ attention_mask: Optional[torch.Tensor] = None,
735
+ position_ids: Optional[torch.Tensor] = None,
736
+ head_mask: Optional[torch.Tensor] = None,
737
+ inputs_embeds: Optional[torch.Tensor] = None,
738
+ labels: Optional[torch.Tensor] = None,
739
+ output_attentions: Optional[bool] = None,
740
+ output_hidden_states: Optional[bool] = None,
741
+ return_dict: Optional[bool] = True,
742
+ ) -> Union[Tuple[torch.FloatTensor], MultipleChoiceModelOutput]:
743
+ r"""
744
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
745
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
746
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
747
+ `input_ids` above)
748
+ """
749
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
750
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
751
+
752
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
753
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
754
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
755
+ inputs_embeds = (
756
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
757
+ if inputs_embeds is not None
758
+ else None
759
+ )
760
+
761
+ outputs = self.ernie_m(
762
+ input_ids,
763
+ attention_mask=attention_mask,
764
+ position_ids=position_ids,
765
+ head_mask=head_mask,
766
+ inputs_embeds=inputs_embeds,
767
+ output_attentions=output_attentions,
768
+ output_hidden_states=output_hidden_states,
769
+ return_dict=return_dict,
770
+ )
771
+
772
+ pooled_output = outputs[1]
773
+
774
+ pooled_output = self.dropout(pooled_output)
775
+ logits = self.classifier(pooled_output)
776
+ reshaped_logits = logits.view(-1, num_choices)
777
+
778
+ loss = None
779
+ if labels is not None:
780
+ loss_fct = CrossEntropyLoss()
781
+ loss = loss_fct(reshaped_logits, labels)
782
+
783
+ if not return_dict:
784
+ output = (reshaped_logits,) + outputs[2:]
785
+ return ((loss,) + output) if loss is not None else output
786
+
787
+ return MultipleChoiceModelOutput(
788
+ loss=loss,
789
+ logits=reshaped_logits,
790
+ hidden_states=outputs.hidden_states,
791
+ attentions=outputs.attentions,
792
+ )
793
+
794
+
795
+ @add_start_docstrings(
796
+ """ErnieM Model with a token classification head on top (a linear layer on top of
797
+ the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""",
798
+ ERNIE_M_START_DOCSTRING,
799
+ )
800
+ class ErnieMForTokenClassification(ErnieMPreTrainedModel):
801
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->ErnieM,bert->ernie_m
802
+ def __init__(self, config):
803
+ super().__init__(config)
804
+ self.num_labels = config.num_labels
805
+
806
+ self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
807
+ classifier_dropout = (
808
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
809
+ )
810
+ self.dropout = nn.Dropout(classifier_dropout)
811
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
812
+
813
+ # Initialize weights and apply final processing
814
+ self.post_init()
815
+
816
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
817
+ @add_code_sample_docstrings(
818
+ processor_class=_TOKENIZER_FOR_DOC,
819
+ checkpoint=_CHECKPOINT_FOR_DOC,
820
+ output_type=TokenClassifierOutput,
821
+ config_class=_CONFIG_FOR_DOC,
822
+ )
823
+ def forward(
824
+ self,
825
+ input_ids: Optional[torch.Tensor] = None,
826
+ attention_mask: Optional[torch.Tensor] = None,
827
+ position_ids: Optional[torch.Tensor] = None,
828
+ head_mask: Optional[torch.Tensor] = None,
829
+ inputs_embeds: Optional[torch.Tensor] = None,
830
+ past_key_values: Optional[List[torch.Tensor]] = None,
831
+ output_hidden_states: Optional[bool] = None,
832
+ output_attentions: Optional[bool] = None,
833
+ return_dict: Optional[bool] = True,
834
+ labels: Optional[torch.Tensor] = None,
835
+ ) -> Union[Tuple[torch.FloatTensor], TokenClassifierOutput]:
836
+ r"""
837
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
838
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
839
+ """
840
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
841
+
842
+ outputs = self.ernie_m(
843
+ input_ids,
844
+ attention_mask=attention_mask,
845
+ position_ids=position_ids,
846
+ head_mask=head_mask,
847
+ inputs_embeds=inputs_embeds,
848
+ past_key_values=past_key_values,
849
+ output_attentions=output_attentions,
850
+ output_hidden_states=output_hidden_states,
851
+ return_dict=return_dict,
852
+ )
853
+
854
+ sequence_output = outputs[0]
855
+
856
+ sequence_output = self.dropout(sequence_output)
857
+ logits = self.classifier(sequence_output)
858
+
859
+ loss = None
860
+ if labels is not None:
861
+ loss_fct = CrossEntropyLoss()
862
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
863
+
864
+ if not return_dict:
865
+ output = (logits,) + outputs[2:]
866
+ return ((loss,) + output) if loss is not None else output
867
+
868
+ return TokenClassifierOutput(
869
+ loss=loss,
870
+ logits=logits,
871
+ hidden_states=outputs.hidden_states,
872
+ attentions=outputs.attentions,
873
+ )
874
+
875
+
876
+ @add_start_docstrings(
877
+ """ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
878
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""",
879
+ ERNIE_M_START_DOCSTRING,
880
+ )
881
+ class ErnieMForQuestionAnswering(ErnieMPreTrainedModel):
882
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->ErnieM,bert->ernie_m
883
+ def __init__(self, config):
884
+ super().__init__(config)
885
+ self.num_labels = config.num_labels
886
+
887
+ self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
888
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
889
+
890
+ # Initialize weights and apply final processing
891
+ self.post_init()
892
+
893
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
894
+ @add_code_sample_docstrings(
895
+ processor_class=_TOKENIZER_FOR_DOC,
896
+ checkpoint=_CHECKPOINT_FOR_DOC,
897
+ output_type=QuestionAnsweringModelOutput,
898
+ config_class=_CONFIG_FOR_DOC,
899
+ )
900
+ def forward(
901
+ self,
902
+ input_ids: Optional[torch.Tensor] = None,
903
+ attention_mask: Optional[torch.Tensor] = None,
904
+ position_ids: Optional[torch.Tensor] = None,
905
+ head_mask: Optional[torch.Tensor] = None,
906
+ inputs_embeds: Optional[torch.Tensor] = None,
907
+ start_positions: Optional[torch.Tensor] = None,
908
+ end_positions: Optional[torch.Tensor] = None,
909
+ output_attentions: Optional[bool] = None,
910
+ output_hidden_states: Optional[bool] = None,
911
+ return_dict: Optional[bool] = True,
912
+ ) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
913
+ r"""
914
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
915
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
916
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
917
+ are not taken into account for computing the loss.
918
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
919
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
920
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
921
+ are not taken into account for computing the loss.
922
+ """
923
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
924
+
925
+ outputs = self.ernie_m(
926
+ input_ids,
927
+ attention_mask=attention_mask,
928
+ position_ids=position_ids,
929
+ head_mask=head_mask,
930
+ inputs_embeds=inputs_embeds,
931
+ output_attentions=output_attentions,
932
+ output_hidden_states=output_hidden_states,
933
+ return_dict=return_dict,
934
+ )
935
+
936
+ sequence_output = outputs[0]
937
+
938
+ logits = self.qa_outputs(sequence_output)
939
+ start_logits, end_logits = logits.split(1, dim=-1)
940
+ start_logits = start_logits.squeeze(-1).contiguous()
941
+ end_logits = end_logits.squeeze(-1).contiguous()
942
+
943
+ total_loss = None
944
+ if start_positions is not None and end_positions is not None:
945
+ # If we are on multi-GPU, split add a dimension
946
+ if len(start_positions.size()) > 1:
947
+ start_positions = start_positions.squeeze(-1)
948
+ if len(end_positions.size()) > 1:
949
+ end_positions = end_positions.squeeze(-1)
950
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
951
+ ignored_index = start_logits.size(1)
952
+ start_positions = start_positions.clamp(0, ignored_index)
953
+ end_positions = end_positions.clamp(0, ignored_index)
954
+
955
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
956
+ start_loss = loss_fct(start_logits, start_positions)
957
+ end_loss = loss_fct(end_logits, end_positions)
958
+ total_loss = (start_loss + end_loss) / 2
959
+
960
+ if not return_dict:
961
+ output = (start_logits, end_logits) + outputs[2:]
962
+ return ((total_loss,) + output) if total_loss is not None else output
963
+
964
+ return QuestionAnsweringModelOutput(
965
+ loss=total_loss,
966
+ start_logits=start_logits,
967
+ end_logits=end_logits,
968
+ hidden_states=outputs.hidden_states,
969
+ attentions=outputs.attentions,
970
+ )
971
+
972
+
973
+ @add_start_docstrings(
974
+ """ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to
975
+ compute `start_prob` and `end_prob`, designed for Universal Information Extraction.""",
976
+ ERNIE_M_START_DOCSTRING,
977
+ )
978
+ # Copied from paddlenlp.transformers.ernie_m.modeling.UIEM
979
+ class ErnieMForInformationExtraction(ErnieMPreTrainedModel):
980
+ def __init__(self, config):
981
+ super(ErnieMForInformationExtraction, self).__init__(config)
982
+ self.ernie_m = ErnieMModel(config)
983
+ self.linear_start = nn.Linear(config.hidden_size, 1)
984
+ self.linear_end = nn.Linear(config.hidden_size, 1)
985
+ self.sigmoid = nn.Sigmoid()
986
+ self.post_init()
987
+
988
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
989
+ def forward(
990
+ self,
991
+ input_ids: Optional[torch.Tensor] = None,
992
+ attention_mask: Optional[torch.Tensor] = None,
993
+ position_ids: Optional[torch.Tensor] = None,
994
+ head_mask: Optional[torch.Tensor] = None,
995
+ inputs_embeds: Optional[torch.Tensor] = None,
996
+ start_positions: Optional[torch.Tensor] = None,
997
+ end_positions: Optional[torch.Tensor] = None,
998
+ output_attentions: Optional[bool] = None,
999
+ output_hidden_states: Optional[bool] = None,
1000
+ return_dict: Optional[bool] = True,
1001
+ ) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
1002
+ r"""
1003
+ start_positions (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1004
+ Labels for position (index) for computing the start_positions loss. Position outside of the sequence are
1005
+ not taken into account for computing the loss.
1006
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1007
+ Labels for position (index) for computing the end_positions loss. Position outside of the sequence are not
1008
+ taken into account for computing the loss.
1009
+ """
1010
+
1011
+ result = self.ernie_m(
1012
+ input_ids,
1013
+ attention_mask=attention_mask,
1014
+ position_ids=position_ids,
1015
+ head_mask=head_mask,
1016
+ inputs_embeds=inputs_embeds,
1017
+ output_attentions=output_attentions,
1018
+ output_hidden_states=output_hidden_states,
1019
+ return_dict=return_dict,
1020
+ )
1021
+ if return_dict:
1022
+ sequence_output = result.last_hidden_state
1023
+ elif not return_dict:
1024
+ sequence_output = result[0]
1025
+
1026
+ start_logits = self.linear_start(sequence_output)
1027
+ start_logits = start_logits.squeeze(-1)
1028
+ end_logits = self.linear_end(sequence_output)
1029
+ end_logits = end_logits.squeeze(-1)
1030
+
1031
+ total_loss = None
1032
+ if start_positions is not None and end_positions is not None:
1033
+ # If we are on multi-GPU, split add a dimension
1034
+ if len(start_positions.size()) > 1:
1035
+ start_positions = start_positions.squeeze(-1)
1036
+ if len(end_positions.size()) > 1:
1037
+ end_positions = end_positions.squeeze(-1)
1038
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1039
+ ignored_index = start_logits.size(1)
1040
+ start_positions = start_positions.clamp(0, ignored_index)
1041
+ end_positions = end_positions.clamp(0, ignored_index)
1042
+
1043
+ loss_fct = BCEWithLogitsLoss()
1044
+ start_loss = loss_fct(start_logits, start_positions)
1045
+ end_loss = loss_fct(end_logits, end_positions)
1046
+ total_loss = (start_loss + end_loss) / 2
1047
+
1048
+ if not return_dict:
1049
+ return tuple(
1050
+ i
1051
+ for i in [total_loss, start_logits, end_logits, result.hidden_states, result.attentions]
1052
+ if i is not None
1053
+ )
1054
+
1055
+ return QuestionAnsweringModelOutput(
1056
+ loss=total_loss,
1057
+ start_logits=start_logits,
1058
+ end_logits=end_logits,
1059
+ hidden_states=result.hidden_states,
1060
+ attentions=result.attentions,
1061
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Ernie-M."""
16
+
17
+ import io
18
+ import os
19
+ import unicodedata
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ SPIECE_UNDERLINE = "▁"
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
33
+
34
+ RESOURCE_FILES_NAMES = {
35
+ "sentencepiece_model_file": "sentencepiece.bpe.model",
36
+ "vocab_file": "vocab.txt",
37
+ }
38
+
39
+ PRETRAINED_VOCAB_FILES_MAP = {
40
+ "vocab_file": {
41
+ "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
42
+ "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
43
+ },
44
+ "sentencepiece_model_file": {
45
+ "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
46
+ "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
47
+ },
48
+ }
49
+
50
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
51
+ "ernie-m-base": 514,
52
+ "ernie-m-large": 514,
53
+ }
54
+
55
+ PRETRAINED_INIT_CONFIGURATION = {
56
+ "ernie-m-base": {"do_lower_case": False},
57
+ "ernie-m-large": {"do_lower_case": False},
58
+ }
59
+
60
+
61
+ # Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer
62
+ class ErnieMTokenizer(PreTrainedTokenizer):
63
+ r"""
64
+ Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words.
65
+
66
+ Args:
67
+ sentencepiece_model_file (`str`):
68
+ The file path of sentencepiece model.
69
+ vocab_file (`str`, *optional*):
70
+ The file path of the vocabulary.
71
+ do_lower_case (`str`, *optional*, defaults to `True`):
72
+ Whether or not to lowercase the input when tokenizing.
73
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
74
+ A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be
75
+ `unk_token` inorder to be converted to an ID.
76
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
77
+ A special token separating two different sentences in the same input.
78
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
79
+ A special token used to make arrays of tokens the same size for batching purposes.
80
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
81
+ A special token used for sequence classification. It is the last token of the sequence when built with
82
+ special tokens.
83
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
84
+ A special token representing a masked token. This is the token used in the masked language modeling task
85
+ which the model tries to predict the original unmasked ones.
86
+ """
87
+
88
+ # Ernie-M model doesn't have token_type embedding.
89
+ model_input_names: List[str] = ["input_ids"]
90
+
91
+ vocab_files_names = VOCAB_FILES_NAMES
92
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
93
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
94
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
95
+ resource_files_names = RESOURCE_FILES_NAMES
96
+
97
+ def __init__(
98
+ self,
99
+ sentencepiece_model_ckpt,
100
+ vocab_file=None,
101
+ do_lower_case=False,
102
+ encoding="utf8",
103
+ unk_token="[UNK]",
104
+ sep_token="[SEP]",
105
+ pad_token="[PAD]",
106
+ cls_token="[CLS]",
107
+ mask_token="[MASK]",
108
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
109
+ **kwargs,
110
+ ) -> None:
111
+ # Mask token behave like a normal word, i.e. include the space before it and
112
+ # is included in the raw text, there should be a match in a non-normalized sentence.
113
+
114
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
115
+
116
+ self.do_lower_case = do_lower_case
117
+ self.sentencepiece_model_ckpt = sentencepiece_model_ckpt
118
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
119
+ self.sp_model.Load(sentencepiece_model_ckpt)
120
+
121
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
122
+ if vocab_file is not None:
123
+ self.vocab = self.load_vocab(filepath=vocab_file)
124
+ else:
125
+ self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())}
126
+ self.reverse_vocab = {v: k for k, v in self.vocab.items()}
127
+
128
+ super().__init__(
129
+ do_lower_case=do_lower_case,
130
+ unk_token=unk_token,
131
+ sep_token=sep_token,
132
+ pad_token=pad_token,
133
+ cls_token=cls_token,
134
+ mask_token=mask_token,
135
+ vocab_file=vocab_file,
136
+ encoding=encoding,
137
+ sp_model_kwargs=self.sp_model_kwargs,
138
+ **kwargs,
139
+ )
140
+
141
+ def get_offset_mapping(self, text):
142
+ if text is None:
143
+ return None
144
+
145
+ split_tokens = self.tokenize(text)
146
+ normalized_text, char_mapping = "", []
147
+
148
+ for i, ch in enumerate(text):
149
+ if ch in self.SP_CHAR_MAPPING:
150
+ ch = self.SP_CHAR_MAPPING.get(ch)
151
+ else:
152
+ ch = unicodedata.normalize("NFKC", ch)
153
+ if self.is_whitespace(ch):
154
+ continue
155
+ normalized_text += ch
156
+ char_mapping.extend([i] * len(ch))
157
+
158
+ text, token_mapping, offset = normalized_text, [], 0
159
+
160
+ if self.do_lower_case:
161
+ text = text.lower()
162
+
163
+ for token in split_tokens:
164
+ if token[:1] == "▁":
165
+ token = token[1:]
166
+ start = text[offset:].index(token) + offset
167
+ end = start + len(token)
168
+
169
+ token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
170
+ offset = end
171
+ return token_mapping
172
+
173
+ @property
174
+ def vocab_size(self):
175
+ return len(self.vocab)
176
+
177
+ def get_vocab(self):
178
+ return dict(self.vocab, **self.added_tokens_encoder)
179
+
180
+ def __getstate__(self):
181
+ state = self.__dict__.copy()
182
+ state["sp_model"] = None
183
+ return state
184
+
185
+ def __setstate__(self, d):
186
+ self.__dict__ = d
187
+
188
+ # for backward compatibility
189
+ if not hasattr(self, "sp_model_kwargs"):
190
+ self.sp_model_kwargs = {}
191
+
192
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
193
+ self.sp_model.Load(self.sentencepiece_model_ckpt)
194
+
195
+ def clean_text(self, text):
196
+ """Performs invalid character removal and whitespace cleanup on text."""
197
+ return "".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))
198
+
199
+ def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
200
+ """Tokenize a string."""
201
+
202
+ if self.sp_model_kwargs.get("enable_sampling") is True:
203
+ enable_sampling = True
204
+ if self.sp_model_kwargs.get("alpha") is not None:
205
+ alpha = self.sp_model_kwargs.get("alpha")
206
+ if self.sp_model_kwargs.get("nbest_size") is not None:
207
+ nbest_size = self.sp_model_kwargs.get("nbest_size")
208
+
209
+ if not enable_sampling:
210
+ pieces = self.sp_model.EncodeAsPieces(text)
211
+ else:
212
+ pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha)
213
+ new_pieces = []
214
+ for pi, piece in enumerate(pieces):
215
+ if piece == SPIECE_UNDERLINE:
216
+ if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0:
217
+ new_pieces.append(SPIECE_UNDERLINE)
218
+ continue
219
+ else:
220
+ continue
221
+ lst_i = 0
222
+ for i, chunk in enumerate(piece):
223
+ if chunk == SPIECE_UNDERLINE:
224
+ continue
225
+ if self.is_ch_char(chunk) or self.is_punct(chunk):
226
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
227
+ new_pieces.append(piece[lst_i:i])
228
+ new_pieces.append(chunk)
229
+ lst_i = i + 1
230
+ elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
231
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
232
+ new_pieces.append(piece[lst_i:i])
233
+ lst_i = i
234
+ elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
235
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
236
+ new_pieces.append(piece[lst_i:i])
237
+ lst_i = i
238
+ if len(piece) > lst_i:
239
+ new_pieces.append(piece[lst_i:])
240
+ return new_pieces
241
+
242
+ def convert_tokens_to_string(self, tokens):
243
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
244
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
245
+ return out_string
246
+
247
+ def convert_ids_to_string(self, ids):
248
+ """
249
+ Converts a sequence of tokens (strings for sub-words) in a single string.
250
+ """
251
+ tokens = self.convert_ids_to_tokens(ids)
252
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
253
+ return out_string
254
+
255
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
256
+ def _convert_token_to_id(self, token):
257
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
258
+
259
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
260
+ def _convert_id_to_token(self, index):
261
+ """Converts an index (integer) in a token (str) using the vocab."""
262
+ return self.reverse_vocab.get(index, self.unk_token)
263
+
264
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
265
+ r"""
266
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
267
+ adding special tokens. An ErnieM sequence has the following format:
268
+
269
+ - single sequence: `[CLS] X [SEP]`
270
+ - pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`
271
+
272
+ Args:
273
+ token_ids_0 (`List[int]`):
274
+ List of IDs to which the special tokens will be added.
275
+ token_ids_1 (`List[int]`, *optional*):
276
+ Optional second list of IDs for sequence pairs.
277
+ Returns:
278
+ `List[int]`: List of input_id with the appropriate special tokens.
279
+ """
280
+ if token_ids_1 is None:
281
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
282
+ _cls = [self.cls_token_id]
283
+ _sep = [self.sep_token_id]
284
+ return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
285
+
286
+ def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
287
+ r"""
288
+ Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M
289
+ offset_mapping has the following format:
290
+
291
+ - single sequence: `(0,0) X (0,0)`
292
+ - pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`
293
+
294
+ Args:
295
+ offset_mapping_ids_0 (`List[tuple]`):
296
+ List of char offsets to which the special tokens will be added.
297
+ offset_mapping_ids_1 (`List[tuple]`, *optional*):
298
+ Optional second list of wordpiece offsets for offset mapping pairs.
299
+ Returns:
300
+ `List[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.
301
+ """
302
+ if offset_mapping_1 is None:
303
+ return [(0, 0)] + offset_mapping_0 + [(0, 0)]
304
+
305
+ return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
306
+
307
+ def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
308
+ r"""
309
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
310
+ special tokens using the tokenizer `encode` method.
311
+
312
+ Args:
313
+ token_ids_0 (`List[int]`):
314
+ List of ids of the first sequence.
315
+ token_ids_1 (`List[int]`, *optional*):
316
+ Optional second list of IDs for sequence pairs.
317
+ already_has_special_tokens (`str`, *optional*, defaults to `False`):
318
+ Whether or not the token list is already formatted with special tokens for the model.
319
+ Returns:
320
+ `List[int]`:
321
+ The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
322
+ """
323
+
324
+ if already_has_special_tokens:
325
+ if token_ids_1 is not None:
326
+ raise ValueError(
327
+ "You should not supply a second sequence if the provided sequence of "
328
+ "ids is already formatted with special tokens for the model."
329
+ )
330
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
331
+
332
+ if token_ids_1 is not None:
333
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
334
+ return [1] + ([0] * len(token_ids_0)) + [1]
335
+
336
+ def create_token_type_ids_from_sequences(
337
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
338
+ ) -> List[int]:
339
+ """
340
+ Create the token type IDs corresponding to the sequences passed. [What are token type
341
+ IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of
342
+ building: those.
343
+
344
+ Args:
345
+ token_ids_0 (`List[int]`):
346
+ The first tokenized sequence.
347
+ token_ids_1 (`List[int]`, *optional*):
348
+ The second tokenized sequence.
349
+ Returns:
350
+ `List[int]`: The token type ids.
351
+ """
352
+ # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
353
+ if token_ids_1 is None:
354
+ # [CLS] X [SEP]
355
+ return (len(token_ids_0) + 2) * [0]
356
+
357
+ # [CLS] A [SEP] [SEP] B [SEP]
358
+ return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)
359
+
360
+ def is_ch_char(self, char):
361
+ """
362
+ is_ch_char
363
+ """
364
+ if "\u4e00" <= char <= "\u9fff":
365
+ return True
366
+ return False
367
+
368
+ def is_alpha(self, char):
369
+ """
370
+ is_alpha
371
+ """
372
+ if ("a" <= char <= "z") or ("A" <= char <= "Z"):
373
+ return True
374
+ return False
375
+
376
+ def is_punct(self, char):
377
+ """
378
+ is_punct
379
+ """
380
+ if char in ",;:.?!~,;:。?!《》【】":
381
+ return True
382
+ return False
383
+
384
+ def is_whitespace(self, char):
385
+ """
386
+ is whitespace
387
+ """
388
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
389
+ return True
390
+ if len(char) == 1:
391
+ cat = unicodedata.category(char)
392
+ if cat == "Zs":
393
+ return True
394
+ return False
395
+
396
+ def load_vocab(self, filepath):
397
+ token_to_idx = {}
398
+ with io.open(filepath, "r", encoding="utf-8") as f:
399
+ for index, line in enumerate(f):
400
+ token = line.rstrip("\n")
401
+ token_to_idx[token] = int(index)
402
+
403
+ return token_to_idx
404
+
405
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
406
+ index = 0
407
+ if os.path.isdir(save_directory):
408
+ vocab_file = os.path.join(
409
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
410
+ )
411
+ else:
412
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
413
+ with open(vocab_file, "w", encoding="utf-8") as writer:
414
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
415
+ if index != token_index:
416
+ logger.warning(
417
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
418
+ " Please check that the vocabulary is not corrupted!"
419
+ )
420
+ index = token_index
421
+ writer.write(token + "\n")
422
+ index += 1
423
+
424
+ tokenizer_model_file = os.path.join(save_directory, "sentencepiece.bpe.model")
425
+ with open(tokenizer_model_file, "wb") as fi:
426
+ content_spiece_model = self.sp_model.serialized_model_proto()
427
+ fi.write(content_spiece_model)
428
+
429
+ return (vocab_file,)
env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__init__.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_torch_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_falcon"] = [
35
+ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "FalconForCausalLM",
37
+ "FalconModel",
38
+ "FalconPreTrainedModel",
39
+ "FalconForSequenceClassification",
40
+ "FalconForTokenClassification",
41
+ "FalconForQuestionAnswering",
42
+ ]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_falcon import (
55
+ FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ FalconForCausalLM,
57
+ FalconForQuestionAnswering,
58
+ FalconForSequenceClassification,
59
+ FalconForTokenClassification,
60
+ FalconModel,
61
+ FalconPreTrainedModel,
62
+ )
63
+
64
+
65
+ else:
66
+ import sys
67
+
68
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/configuration_falcon.cpython-310.pyc ADDED
Binary file (7.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/convert_custom_code_checkpoint.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/__pycache__/modeling_falcon.cpython-310.pyc ADDED
Binary file (44.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/configuration_falcon.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Falcon configuration"""
16
+ from ...configuration_utils import PretrainedConfig
17
+ from ...utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+ FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP = {
23
+ "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
24
+ "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class FalconConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the
33
+ [tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 65024):
41
+ Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`FalconModel`]
43
+ hidden_size (`int`, *optional*, defaults to 4544):
44
+ Dimension of the hidden representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer decoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 71):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
50
+ The epsilon used by the layer normalization layers.
51
+ initializer_range (`float`, *optional*, defaults to 0.02):
52
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
53
+ use_cache (`bool`, *optional*, defaults to `True`):
54
+ Whether the model should return the last key/values attentions (not used by all models). Only relevant if
55
+ `config.is_decoder=True`.
56
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
57
+ The dropout probability for MLP layers.
58
+ attention_dropout (`float`, *optional*, defaults to 0.0):
59
+ The dropout probability for attention layers.
60
+ num_kv_heads (`int`, *optional*):
61
+ Number of key-value heads to use per attention layer. If unset, defaults to the same value as
62
+ `num_attention_heads`.
63
+ alibi (`bool`, *optional*, defaults to `False`):
64
+ Whether to use ALiBi positional biases during self-attention.
65
+ new_decoder_architecture (`bool`, *optional*, defaults to `False`):
66
+ Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn`
67
+ arguments are ignored, as the new decoder always uses parallel attention.
68
+ multi_query (`bool`, *optional*, defaults to `True`):
69
+ Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`.
70
+ parallel_attn (`bool`, *optional*, defaults to `True`):
71
+ Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive
72
+ instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`.
73
+ bias (`bool`, *optional*, defaults to `False`):
74
+ Whether to use bias on Linear layers.
75
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
76
+ The maximum sequence length that this model might ever be used with, when `alibi` is `False`. Pretrained
77
+ Falcon models with RoPE support up to 2048 tokens.
78
+ rope_theta (`float`, *optional*, defaults to 10000.0):
79
+ The base period of the RoPE embeddings.
80
+ rope_scaling (`Dict`, *optional*):
81
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
82
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
83
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
84
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
85
+ these scaling strategies behave:
86
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
87
+ experimental feature, subject to breaking API changes in future versions.
88
+ bos_token_id (`int`, *optional*, defaults to 11):
89
+ The id of the "beginning-of-sequence" token.
90
+ eos_token_id (`int`, *optional*, defaults to 11):
91
+ The id of the "end-of-sequence" token.
92
+
93
+ Example:
94
+
95
+ ```python
96
+ >>> from transformers import FalconModel, FalconConfig
97
+
98
+ >>> # Initializing a small (2-layer) Falcon configuration
99
+ >>> configuration = FalconConfig(num_hidden_layers=2)
100
+
101
+ >>> # Initializing a model from the small configuration
102
+ >>> model = FalconModel(configuration)
103
+
104
+ >>> # Accessing the model configuration
105
+ >>> configuration = model.config
106
+ ```"""
107
+
108
+ model_type = "falcon"
109
+ keys_to_ignore_at_inference = ["past_key_values"]
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_size=65024,
114
+ hidden_size=4544,
115
+ num_hidden_layers=32,
116
+ num_attention_heads=71,
117
+ layer_norm_epsilon=1e-5,
118
+ initializer_range=0.02,
119
+ use_cache=True,
120
+ hidden_dropout=0.0,
121
+ attention_dropout=0.0,
122
+ num_kv_heads=None,
123
+ alibi=False,
124
+ new_decoder_architecture=False,
125
+ multi_query=True,
126
+ parallel_attn=True,
127
+ bias=False,
128
+ max_position_embeddings=2048,
129
+ rope_theta=10000.0,
130
+ rope_scaling=None,
131
+ bos_token_id=11,
132
+ eos_token_id=11,
133
+ **kwargs,
134
+ ):
135
+ self.vocab_size = vocab_size
136
+ # Backward compatibility with n_embed kwarg
137
+ n_embed = kwargs.pop("n_embed", None)
138
+ self.hidden_size = hidden_size if n_embed is None else n_embed
139
+ self.num_hidden_layers = num_hidden_layers
140
+ self.num_attention_heads = num_attention_heads
141
+ self.layer_norm_epsilon = layer_norm_epsilon
142
+ self.initializer_range = initializer_range
143
+ self.use_cache = use_cache
144
+ self.hidden_dropout = hidden_dropout
145
+ self.attention_dropout = attention_dropout
146
+
147
+ self.bos_token_id = bos_token_id
148
+ self.eos_token_id = eos_token_id
149
+ self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads
150
+ self.alibi = alibi
151
+ self.new_decoder_architecture = new_decoder_architecture
152
+ self.multi_query = multi_query # Ignored when new_decoder_architecture is True
153
+ self.parallel_attn = parallel_attn
154
+ self.bias = bias
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.rope_theta = rope_theta
157
+ self.rope_scaling = rope_scaling
158
+ self._rope_scaling_validation()
159
+
160
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
161
+
162
+ @property
163
+ def head_dim(self):
164
+ return self.hidden_size // self.num_attention_heads
165
+
166
+ @property
167
+ def rotary(self):
168
+ return not self.alibi
169
+
170
+ def _rope_scaling_validation(self):
171
+ """
172
+ Validate the `rope_scaling` configuration.
173
+ """
174
+ if self.rope_scaling is None:
175
+ return
176
+
177
+ if self.alibi:
178
+ raise ValueError("`rope_scaling` is not supported when `alibi` is `True`.")
179
+
180
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
181
+ raise ValueError(
182
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
183
+ f"got {self.rope_scaling}"
184
+ )
185
+ rope_scaling_type = self.rope_scaling.get("type", None)
186
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
187
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
188
+ raise ValueError(
189
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
190
+ )
191
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
192
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/convert_custom_code_checkpoint.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from argparse import ArgumentParser
3
+ from pathlib import Path
4
+
5
+
6
+ """
7
+ This script converts Falcon custom code checkpoints to modern Falcon checkpoints that use code in the Transformers
8
+ library. After conversion, performance (especially for generation) should improve and the checkpoint can be loaded
9
+ without needing trust_remote_code=True.
10
+ """
11
+
12
+ if __name__ == "__main__":
13
+ parser = ArgumentParser()
14
+ parser.add_argument(
15
+ "--checkpoint_dir",
16
+ type=Path,
17
+ required=True,
18
+ help="Directory containing a custom code checkpoint to convert to a modern Falcon checkpoint.",
19
+ )
20
+ args = parser.parse_args()
21
+
22
+ if not args.checkpoint_dir.is_dir():
23
+ raise ValueError("--checkpoint_dir argument should be a directory!")
24
+
25
+ if (
26
+ not (args.checkpoint_dir / "configuration_RW.py").is_file()
27
+ or not (args.checkpoint_dir / "modelling_RW.py").is_file()
28
+ ):
29
+ raise ValueError(
30
+ "The model directory should contain configuration_RW.py and modelling_RW.py files! Are you sure this is a custom code checkpoint?"
31
+ )
32
+ (args.checkpoint_dir / "configuration_RW.py").unlink()
33
+ (args.checkpoint_dir / "modelling_RW.py").unlink()
34
+
35
+ config = args.checkpoint_dir / "config.json"
36
+ text = config.read_text()
37
+ text = text.replace("RWForCausalLM", "FalconForCausalLM")
38
+ text = text.replace("RefinedWebModel", "falcon")
39
+ text = text.replace("RefinedWeb", "falcon")
40
+ json_config = json.loads(text)
41
+ del json_config["auto_map"]
42
+
43
+ if "n_head" in json_config:
44
+ json_config["num_attention_heads"] = json_config.pop("n_head")
45
+ if "n_layer" in json_config:
46
+ json_config["num_hidden_layers"] = json_config.pop("n_layer")
47
+ if "n_head_kv" in json_config:
48
+ json_config["num_kv_heads"] = json_config.pop("n_head_kv")
49
+ json_config["new_decoder_architecture"] = True
50
+ else:
51
+ json_config["new_decoder_architecture"] = False
52
+ bos_token_id = json_config.get("bos_token_id", 1)
53
+ eos_token_id = json_config.get("eos_token_id", 2)
54
+ config.unlink()
55
+ config.write_text(json.dumps(json_config, indent=2, sort_keys=True))
56
+
57
+ tokenizer_config = args.checkpoint_dir / "tokenizer_config.json"
58
+ if tokenizer_config.is_file():
59
+ text = tokenizer_config.read_text()
60
+ json_config = json.loads(text)
61
+ if json_config["tokenizer_class"] == "PreTrainedTokenizerFast":
62
+ json_config["model_input_names"] = ["input_ids", "attention_mask"]
63
+ tokenizer_config.unlink()
64
+ tokenizer_config.write_text(json.dumps(json_config, indent=2, sort_keys=True))
65
+
66
+ generation_config_path = args.checkpoint_dir / "generation_config.json"
67
+ generation_dict = {
68
+ "_from_model_config": True,
69
+ "bos_token_id": bos_token_id,
70
+ "eos_token_id": eos_token_id,
71
+ "transformers_version": "4.33.0.dev0",
72
+ }
73
+ generation_config_path.write_text(json.dumps(generation_dict, indent=2, sort_keys=True))
74
+ print("Done! Please double-check that the new checkpoint works as expected.")
env-llmeval/lib/python3.10/site-packages/transformers/models/falcon/modeling_falcon.py ADDED
@@ -0,0 +1,1648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Falcon model."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import TYPE_CHECKING, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
25
+ from torch.nn import functional as F
26
+
27
+ from ...modeling_attn_mask_utils import (
28
+ AttentionMaskConverter,
29
+ _prepare_4d_causal_attention_mask,
30
+ _prepare_4d_causal_attention_mask_for_sdpa,
31
+ )
32
+ from ...modeling_outputs import (
33
+ BaseModelOutputWithPastAndCrossAttentions,
34
+ CausalLMOutputWithCrossAttentions,
35
+ QuestionAnsweringModelOutput,
36
+ SequenceClassifierOutputWithPast,
37
+ TokenClassifierOutput,
38
+ )
39
+ from ...modeling_utils import PreTrainedModel
40
+ from ...pytorch_utils import is_torch_greater_or_equal_than_2_0
41
+ from ...utils import (
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ is_flash_attn_2_available,
46
+ is_flash_attn_greater_or_equal_2_10,
47
+ logging,
48
+ )
49
+ from .configuration_falcon import FalconConfig
50
+
51
+
52
+ if TYPE_CHECKING:
53
+ from ...configuration_utils import PretrainedConfig
54
+
55
+ if is_flash_attn_2_available():
56
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
57
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
58
+
59
+ logger = logging.get_logger(__name__)
60
+
61
+ FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = [
62
+ "tiiuae/falcon-40b",
63
+ "tiiuae/falcon-40b-instruct",
64
+ "tiiuae/falcon-7b",
65
+ "tiiuae/falcon-7b-instruct",
66
+ "tiiuae/falcon-rw-7b",
67
+ "tiiuae/falcon-rw-1b",
68
+ ]
69
+ _CHECKPOINT_FOR_DOC = "Rocketknight1/falcon-rw-1b"
70
+ _CONFIG_FOR_DOC = "FalconConfig"
71
+
72
+
73
+ # NOTE(Hesslow): Unfortunately we did not fuse matmul and bias during training, this means that there's one additional quantization to bfloat16 between the operations.
74
+ # In order not to degrade the quality of our HF-port, we keep these characteristics in the final model.
75
+ class FalconLinear(nn.Linear):
76
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
77
+ hidden_states = input @ self.weight.T
78
+ if self.bias is None:
79
+ return hidden_states
80
+ return hidden_states + self.bias
81
+
82
+
83
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
84
+ def rotate_half(x):
85
+ """Rotates half the hidden dims of the input."""
86
+ x1 = x[..., : x.shape[-1] // 2]
87
+ x2 = x[..., x.shape[-1] // 2 :]
88
+ return torch.cat((-x2, x1), dim=-1)
89
+
90
+
91
+ # Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
92
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
93
+ """Applies Rotary Position Embedding to the query and key tensors.
94
+
95
+ Args:
96
+ q (`torch.Tensor`): The query tensor.
97
+ k (`torch.Tensor`): The key tensor.
98
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
99
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
100
+ position_ids (`torch.Tensor`):
101
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
102
+ used to pass offsetted position ids when working with a KV-cache.
103
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
104
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
105
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
106
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
107
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
108
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
109
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
110
+ Returns:
111
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
112
+ """
113
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
114
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
115
+ q_embed = (q * cos) + (rotate_half(q) * sin)
116
+ k_embed = (k * cos) + (rotate_half(k) * sin)
117
+ return q_embed, k_embed
118
+
119
+
120
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
121
+ def _get_unpad_data(attention_mask):
122
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
123
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
124
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
125
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
126
+ return (
127
+ indices,
128
+ cu_seqlens,
129
+ max_seqlen_in_batch,
130
+ )
131
+
132
+
133
+ # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Falcon
134
+ class FalconRotaryEmbedding(nn.Module):
135
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
136
+ super().__init__()
137
+
138
+ self.dim = dim
139
+ self.max_position_embeddings = max_position_embeddings
140
+ self.base = base
141
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
142
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
143
+
144
+ # Build here to make `torch.jit.trace` work.
145
+ self._set_cos_sin_cache(
146
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
147
+ )
148
+
149
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
150
+ self.max_seq_len_cached = seq_len
151
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
152
+
153
+ freqs = torch.outer(t, self.inv_freq)
154
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
155
+ emb = torch.cat((freqs, freqs), dim=-1)
156
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
157
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
158
+
159
+ def forward(self, x, seq_len=None):
160
+ # x: [bs, num_attention_heads, seq_len, head_size]
161
+ if seq_len > self.max_seq_len_cached:
162
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
163
+
164
+ return (
165
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
166
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
167
+ )
168
+
169
+
170
+ # copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->Falcon
171
+ # TODO @joao no longer copied from LLama after static cache, fix me (copied -> Copied)
172
+ class FalconLinearScalingRotaryEmbedding(FalconRotaryEmbedding):
173
+ """FalconRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
174
+
175
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
176
+ self.scaling_factor = scaling_factor
177
+ super().__init__(dim, max_position_embeddings, base, device)
178
+
179
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
180
+ self.max_seq_len_cached = seq_len
181
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
182
+ t = t / self.scaling_factor
183
+
184
+ freqs = torch.outer(t, self.inv_freq)
185
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
186
+ emb = torch.cat((freqs, freqs), dim=-1)
187
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
188
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
189
+
190
+
191
+ # copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->Falcon
192
+ # TODO @joao no longer copied from LLama after static cache, fix me (copied -> Copied)
193
+ class FalconDynamicNTKScalingRotaryEmbedding(FalconRotaryEmbedding):
194
+ """FalconRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
195
+
196
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
197
+ self.scaling_factor = scaling_factor
198
+ super().__init__(dim, max_position_embeddings, base, device)
199
+
200
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
201
+ self.max_seq_len_cached = seq_len
202
+
203
+ if seq_len > self.max_position_embeddings:
204
+ base = self.base * (
205
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
206
+ ) ** (self.dim / (self.dim - 2))
207
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
208
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
209
+
210
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
211
+
212
+ freqs = torch.outer(t, self.inv_freq)
213
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
214
+ emb = torch.cat((freqs, freqs), dim=-1)
215
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
216
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
217
+
218
+
219
+ def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
220
+ batch_size, seq_length = attention_mask.shape
221
+ closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
222
+ base = torch.tensor(
223
+ 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
224
+ )
225
+ powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
226
+ slopes = torch.pow(base, powers)
227
+
228
+ if closest_power_of_2 != num_heads:
229
+ extra_base = torch.tensor(
230
+ 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
231
+ )
232
+ num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
233
+ extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
234
+ slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
235
+
236
+ # Note: alibi will added to the attention bias that will be applied to the query, key product of attention
237
+ # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
238
+ # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
239
+ # => the query_length dimension will then be broadcasted correctly
240
+ # This is more or less identical to T5's relative position bias:
241
+ # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
242
+ arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
243
+ alibi = slopes[..., None].bfloat16() * arange_tensor
244
+ return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
245
+
246
+
247
+ # Copied from transformers.models.bloom.modeling_bloom.dropout_add
248
+ def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
249
+ """
250
+ Dropout add function
251
+
252
+ Args:
253
+ x (`torch.tensor`, *required*):
254
+ input tensor
255
+ residual (`torch.tensor`, *required*):
256
+ residual tensor
257
+ prob (`float`, *required*):
258
+ dropout probability
259
+ training (`bool`, *required*):
260
+ training mode
261
+ """
262
+ out = F.dropout(x, p=prob, training=training)
263
+ out = residual + out
264
+ return out
265
+
266
+
267
+ class FalconAttention(nn.Module):
268
+ def __init__(self, config: FalconConfig):
269
+ super().__init__()
270
+
271
+ self.config = config
272
+ self.hidden_size = config.hidden_size
273
+ self.num_heads = config.num_attention_heads
274
+ self.head_dim = self.hidden_size // self.num_heads
275
+ self.split_size = self.hidden_size
276
+ self.hidden_dropout = config.hidden_dropout
277
+ self.max_position_embeddings = config.max_position_embeddings
278
+ self.rope_theta = config.rope_theta
279
+ self.is_causal = True
280
+ self._use_sdpa = config._attn_implementation == "sdpa"
281
+
282
+ if self.head_dim * self.num_heads != self.hidden_size:
283
+ raise ValueError(
284
+ f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
285
+ f" {self.num_heads})."
286
+ )
287
+
288
+ if config.rotary:
289
+ self._init_rope()
290
+
291
+ # Layer-wise attention scaling
292
+ self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
293
+ self.beta = self.inv_norm_factor
294
+ if config.new_decoder_architecture:
295
+ qkv_out_dim = (config.num_kv_heads * 2 + config.num_attention_heads) * self.head_dim
296
+ elif config.multi_query:
297
+ qkv_out_dim = self.hidden_size + 2 * self.head_dim
298
+ else:
299
+ qkv_out_dim = 3 * self.hidden_size
300
+ self.query_key_value = FalconLinear(self.hidden_size, qkv_out_dim, bias=config.bias)
301
+ self.new_decoder_architecture = config.new_decoder_architecture
302
+ self.multi_query = config.multi_query
303
+ self.dense = FalconLinear(self.hidden_size, self.hidden_size, bias=config.bias)
304
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
305
+ self.num_kv_heads = config.num_kv_heads if (self.new_decoder_architecture or not self.multi_query) else 1
306
+
307
+ # Copied from transformers.models.llama.modeling_llama.LlamaAttention._init_rope with Llama->Falcon
308
+ def _init_rope(self):
309
+ if self.config.rope_scaling is None:
310
+ self.rotary_emb = FalconRotaryEmbedding(
311
+ self.head_dim,
312
+ max_position_embeddings=self.max_position_embeddings,
313
+ base=self.rope_theta,
314
+ )
315
+ else:
316
+ scaling_type = self.config.rope_scaling["type"]
317
+ scaling_factor = self.config.rope_scaling["factor"]
318
+ if scaling_type == "linear":
319
+ self.rotary_emb = FalconLinearScalingRotaryEmbedding(
320
+ self.head_dim,
321
+ max_position_embeddings=self.max_position_embeddings,
322
+ scaling_factor=scaling_factor,
323
+ base=self.rope_theta,
324
+ )
325
+ elif scaling_type == "dynamic":
326
+ self.rotary_emb = FalconDynamicNTKScalingRotaryEmbedding(
327
+ self.head_dim,
328
+ max_position_embeddings=self.max_position_embeddings,
329
+ scaling_factor=scaling_factor,
330
+ base=self.rope_theta,
331
+ )
332
+ else:
333
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
334
+
335
+ def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
336
+ """
337
+ Split the last dimension into (num_heads, head_dim), results share same memory storage as `fused_qkv`
338
+
339
+ Args:
340
+ fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim]
341
+
342
+ Returns:
343
+ query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
344
+ value: [batch_size, seq_length, num_heads, head_dim]
345
+ """
346
+ if self.new_decoder_architecture:
347
+ batch, seq_len, _ = fused_qkv.shape
348
+ qkv = fused_qkv.view(batch, seq_len, -1, self.num_heads // self.num_kv_heads + 2, self.head_dim)
349
+ query = qkv[:, :, :, :-2]
350
+ key = qkv[:, :, :, [-2]]
351
+ value = qkv[:, :, :, [-1]]
352
+ key = torch.broadcast_to(key, query.shape)
353
+ value = torch.broadcast_to(value, query.shape)
354
+
355
+ query, key, value = [x.flatten(2, 3) for x in (query, key, value)]
356
+ return query, key, value
357
+ elif not self.multi_query:
358
+ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
359
+ fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
360
+ return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]
361
+ else:
362
+ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
363
+ fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads + 2, self.head_dim)
364
+ return fused_qkv[..., :-2, :], fused_qkv[..., [-2], :], fused_qkv[..., [-1], :]
365
+
366
+ # Copied from transformers.models.bloom.modeling_bloom.BloomAttention._merge_heads
367
+ def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
368
+ """
369
+ Merge heads together over the last dimension
370
+
371
+ Args:
372
+ x (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim]
373
+
374
+ Returns:
375
+ torch.tensor: [batch_size, seq_length, num_heads * head_dim]
376
+ """
377
+ # What we want to achieve is:
378
+ # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim
379
+ batch_size_and_num_heads, seq_length, _ = x.shape
380
+ batch_size = batch_size_and_num_heads // self.num_heads
381
+
382
+ # First view to decompose the batch size
383
+ # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim
384
+ x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
385
+
386
+ # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim
387
+ x = x.permute(0, 2, 1, 3)
388
+
389
+ # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim
390
+ return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
391
+
392
+ def forward(
393
+ self,
394
+ hidden_states: torch.Tensor,
395
+ alibi: Optional[torch.Tensor],
396
+ attention_mask: torch.Tensor,
397
+ position_ids: Optional[torch.LongTensor] = None,
398
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
399
+ head_mask: Optional[torch.Tensor] = None,
400
+ use_cache: bool = False,
401
+ output_attentions: bool = False,
402
+ **kwargs,
403
+ ):
404
+ if "padding_mask" in kwargs:
405
+ warnings.warn(
406
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
407
+ )
408
+
409
+ fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size]
410
+ num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads
411
+ # 3 x [batch_size, seq_length, num_heads, head_dim]
412
+ (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv)
413
+
414
+ batch_size, query_length, _, _ = query_layer.shape
415
+
416
+ query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim)
417
+ key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
418
+ value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
419
+
420
+ kv_seq_len = key_layer.shape[-2]
421
+ if layer_past is not None:
422
+ kv_seq_len += layer_past[0].shape[-2]
423
+ if alibi is None:
424
+ cos, sin = self.rotary_emb(value_layer, seq_len=kv_seq_len)
425
+ query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin, position_ids)
426
+
427
+ if layer_past is not None:
428
+ past_key, past_value = layer_past
429
+ # concatenate along seq_length dimension:
430
+ # - key: [batch_size, self.num_heads, kv_length, head_dim]
431
+ # - value: [batch_size, self.num_heads, kv_length, head_dim]
432
+ key_layer = torch.cat((past_key, key_layer), dim=-2)
433
+ value_layer = torch.cat((past_value, value_layer), dim=-2)
434
+
435
+ kv_length = key_layer.shape[-2]
436
+ if use_cache:
437
+ present = (key_layer, value_layer)
438
+ else:
439
+ present = None
440
+
441
+ if self._use_sdpa and query_layer.device.type == "cuda" and attention_mask is not None:
442
+ # For torch<=2.1.2, SDPA with memory-efficient backend is bugged with non-contiguous inputs with custom attn_mask,
443
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
444
+ query_layer = query_layer.contiguous()
445
+ key_layer = key_layer.contiguous()
446
+ value_layer = value_layer.contiguous()
447
+
448
+ if alibi is None:
449
+ if self._use_sdpa and not output_attentions:
450
+ attn_output = F.scaled_dot_product_attention(
451
+ query_layer,
452
+ key_layer,
453
+ value_layer,
454
+ attention_mask,
455
+ 0.0,
456
+ # The query_length > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case query_length == 1.
457
+ is_causal=self.is_causal and attention_mask is None and query_length > 1,
458
+ )
459
+
460
+ attention_scores = None
461
+ else:
462
+ attention_scores = query_layer @ key_layer.transpose(-1, -2)
463
+ attention_scores /= math.sqrt(self.head_dim)
464
+
465
+ attention_scores = F.softmax(attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype)
466
+ # It is unclear why neither dropout nor head_mask is applied here (while it is with alibi).
467
+ attn_output = attention_scores @ value_layer
468
+
469
+ attn_output = attn_output.view(batch_size, self.num_heads, query_length, self.head_dim)
470
+ attn_output = attn_output.permute(0, 2, 1, 3)
471
+ attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
472
+
473
+ attn_output = self.dense(attn_output)
474
+
475
+ if output_attentions:
476
+ return attn_output, present, attention_scores
477
+ else:
478
+ return attn_output, present
479
+
480
+ else:
481
+ if self._use_sdpa and not output_attentions and head_mask is None:
482
+ attn_output = F.scaled_dot_product_attention(
483
+ query_layer,
484
+ key_layer,
485
+ value_layer,
486
+ attn_mask=attention_mask,
487
+ dropout_p=self.attention_dropout.p if self.training else 0.0,
488
+ is_causal=self.is_causal and attention_mask is None and query_length > 1,
489
+ )
490
+ attn_output = attn_output.transpose(1, 2)
491
+ attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
492
+
493
+ attn_output = self.dense(attn_output)
494
+ else:
495
+ matmul_result = query_layer @ key_layer.transpose(-1, -2)
496
+
497
+ # change view to [batch_size, num_heads, q_length, kv_length]
498
+ attention_scores = matmul_result.view(batch_size, self.num_heads, query_length, kv_length)
499
+
500
+ # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length]
501
+ input_dtype = attention_scores.dtype
502
+ # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38`
503
+ if input_dtype == torch.float16 or input_dtype == torch.bfloat16:
504
+ attention_scores = attention_scores.to(torch.float32)
505
+
506
+ attention_logits = attention_scores + alibi.view(batch_size, self.num_heads, 1, -1)
507
+ attention_logits *= self.inv_norm_factor
508
+ attention_probs = F.softmax(attention_logits + attention_mask, dim=-1, dtype=hidden_states.dtype)
509
+ # [batch_size, num_heads, q_length, kv_length]
510
+ attention_probs = self.attention_dropout(attention_probs)
511
+
512
+ if head_mask is not None:
513
+ attention_probs = attention_probs * head_mask
514
+
515
+ # change view [batch_size, num_heads, q_length, kv_length]
516
+ attention_probs_reshaped = attention_probs.view(batch_size, self.num_heads, query_length, kv_length)
517
+
518
+ # matmul: [batch_size * num_heads, q_length, head_dim]
519
+ attn_output = (attention_probs_reshaped @ value_layer).flatten(0, 1)
520
+
521
+ # change view [batch_size, q_length, num_heads * head_dim]
522
+ attn_output = self._merge_heads(attn_output)
523
+
524
+ attn_output = self.dense(attn_output)
525
+
526
+ if output_attentions:
527
+ return attn_output, present, attention_probs
528
+ else:
529
+ return attn_output, present
530
+
531
+
532
+ class FalconFlashAttention2(FalconAttention):
533
+ """
534
+ Falcon flash attention module. This module inherits from `FalconAttention` as the weights of the module stays
535
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
536
+ flash attention and deal with padding tokens in case the input contains any of them.
537
+ """
538
+
539
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
540
+ def __init__(self, *args, **kwargs):
541
+ super().__init__(*args, **kwargs)
542
+
543
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
544
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
545
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
546
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
547
+
548
+ def forward(
549
+ self,
550
+ hidden_states: torch.Tensor,
551
+ alibi: Optional[torch.Tensor],
552
+ attention_mask: torch.Tensor,
553
+ position_ids: Optional[torch.LongTensor] = None,
554
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
555
+ head_mask: Optional[torch.Tensor] = None,
556
+ use_cache: bool = False,
557
+ output_attentions: bool = False,
558
+ **kwargs,
559
+ ):
560
+ if "padding_mask" in kwargs:
561
+ warnings.warn(
562
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
563
+ )
564
+
565
+ # overwrite attention_mask with padding_mask
566
+ attention_mask = kwargs.pop("padding_mask")
567
+
568
+ fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size]
569
+ num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads
570
+ # 3 x [batch_size, seq_length, num_heads, head_dim]
571
+ (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv)
572
+
573
+ batch_size, query_length, _, _ = query_layer.shape
574
+
575
+ query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim)
576
+ key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
577
+ value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim)
578
+
579
+ kv_seq_len = key_layer.shape[-2]
580
+ if layer_past is not None:
581
+ kv_seq_len += layer_past[0].shape[-2]
582
+ if alibi is None:
583
+ cos, sin = self.rotary_emb(value_layer, seq_len=kv_seq_len)
584
+ query_layer, key_layer = apply_rotary_pos_emb(query_layer, key_layer, cos, sin, position_ids)
585
+
586
+ if layer_past is not None and use_cache:
587
+ past_key, past_value = layer_past
588
+ # concatenate along seq_length dimension:
589
+ # - key: [batch_size, self.num_heads, kv_length, head_dim]
590
+ # - value: [batch_size, self.num_heads, kv_length, head_dim]
591
+ key_layer = torch.cat((past_key, key_layer), dim=-2)
592
+ value_layer = torch.cat((past_value, value_layer), dim=-2)
593
+
594
+ past_key_value = (key_layer, value_layer) if use_cache else None
595
+
596
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
597
+ # to be able to avoid many of these transpose/reshape/view.
598
+ query_layer = query_layer.transpose(1, 2)
599
+ key_layer = key_layer.transpose(1, 2)
600
+ value_layer = value_layer.transpose(1, 2)
601
+
602
+ if alibi is not None:
603
+ raise ValueError("`alibi` is not supported when `use_flash_attn` is True")
604
+
605
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
606
+
607
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
608
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
609
+ # cast them back in float16 just to be sure everything works as expected.
610
+ input_dtype = query_layer.dtype
611
+ if input_dtype == torch.float32:
612
+ if torch.is_autocast_enabled():
613
+ target_dtype = torch.get_autocast_gpu_dtype()
614
+ # Handle the case where the model is quantized
615
+ elif hasattr(self.config, "_pre_quantization_dtype"):
616
+ target_dtype = self.config._pre_quantization_dtype
617
+ else:
618
+ target_dtype = self.query_key_value.weight.dtype
619
+
620
+ logger.warning_once(
621
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
622
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
623
+ f" {target_dtype}."
624
+ )
625
+
626
+ query_layer = query_layer.to(target_dtype)
627
+ key_layer = key_layer.to(target_dtype)
628
+ value_layer = value_layer.to(target_dtype)
629
+
630
+ attn_output = self._flash_attention_forward(
631
+ query_layer, key_layer, value_layer, attention_mask, query_length, dropout=attn_dropout
632
+ )
633
+
634
+ attn_weights = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
635
+ attn_output = self.dense(attn_weights)
636
+
637
+ if not output_attentions:
638
+ attn_weights = None
639
+
640
+ return attn_output, past_key_value, attn_weights
641
+
642
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
643
+ def _flash_attention_forward(
644
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
645
+ ):
646
+ """
647
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
648
+ first unpad the input, then computes the attention scores and pad the final attention scores.
649
+
650
+ Args:
651
+ query_states (`torch.Tensor`):
652
+ Input query states to be passed to Flash Attention API
653
+ key_states (`torch.Tensor`):
654
+ Input key states to be passed to Flash Attention API
655
+ value_states (`torch.Tensor`):
656
+ Input value states to be passed to Flash Attention API
657
+ attention_mask (`torch.Tensor`):
658
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
659
+ position of padding tokens and 1 for the position of non-padding tokens.
660
+ dropout (`float`):
661
+ Attention dropout
662
+ softmax_scale (`float`, *optional*):
663
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
664
+ """
665
+ if not self._flash_attn_uses_top_left_mask:
666
+ causal = self.is_causal
667
+ else:
668
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
669
+ causal = self.is_causal and query_length != 1
670
+
671
+ # Contains at least one padding token in the sequence
672
+ if attention_mask is not None:
673
+ batch_size = query_states.shape[0]
674
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
675
+ query_states, key_states, value_states, attention_mask, query_length
676
+ )
677
+
678
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
679
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
680
+
681
+ attn_output_unpad = flash_attn_varlen_func(
682
+ query_states,
683
+ key_states,
684
+ value_states,
685
+ cu_seqlens_q=cu_seqlens_q,
686
+ cu_seqlens_k=cu_seqlens_k,
687
+ max_seqlen_q=max_seqlen_in_batch_q,
688
+ max_seqlen_k=max_seqlen_in_batch_k,
689
+ dropout_p=dropout,
690
+ softmax_scale=softmax_scale,
691
+ causal=causal,
692
+ )
693
+
694
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
695
+ else:
696
+ attn_output = flash_attn_func(
697
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
698
+ )
699
+
700
+ return attn_output
701
+
702
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
703
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
704
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
705
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
706
+
707
+ key_layer = index_first_axis(
708
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
709
+ )
710
+ value_layer = index_first_axis(
711
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
712
+ )
713
+ if query_length == kv_seq_len:
714
+ query_layer = index_first_axis(
715
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
716
+ )
717
+ cu_seqlens_q = cu_seqlens_k
718
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
719
+ indices_q = indices_k
720
+ elif query_length == 1:
721
+ max_seqlen_in_batch_q = 1
722
+ cu_seqlens_q = torch.arange(
723
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
724
+ ) # There is a memcpy here, that is very bad.
725
+ indices_q = cu_seqlens_q[:-1]
726
+ query_layer = query_layer.squeeze(1)
727
+ else:
728
+ # The -q_len: slice assumes left padding.
729
+ attention_mask = attention_mask[:, -query_length:]
730
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
731
+
732
+ return (
733
+ query_layer,
734
+ key_layer,
735
+ value_layer,
736
+ indices_q,
737
+ (cu_seqlens_q, cu_seqlens_k),
738
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
739
+ )
740
+
741
+
742
+ class FalconMLP(nn.Module):
743
+ def __init__(self, config: FalconConfig):
744
+ super().__init__()
745
+ hidden_size = config.hidden_size
746
+
747
+ self.dense_h_to_4h = FalconLinear(hidden_size, 4 * hidden_size, bias=config.bias)
748
+ self.act = nn.GELU()
749
+ self.dense_4h_to_h = FalconLinear(4 * hidden_size, hidden_size, bias=config.bias)
750
+ self.hidden_dropout = config.hidden_dropout
751
+
752
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
753
+ x = self.act(self.dense_h_to_4h(x))
754
+ x = self.dense_4h_to_h(x)
755
+ return x
756
+
757
+
758
+ FALCON_ATTENTION_CLASSES = {
759
+ "eager": FalconAttention,
760
+ "sdpa": FalconAttention, # FalconAttention originally implemented both a forward with & without SDPA
761
+ "flash_attention_2": FalconFlashAttention2,
762
+ }
763
+
764
+
765
+ class FalconDecoderLayer(nn.Module):
766
+ def __init__(self, config: FalconConfig):
767
+ super().__init__()
768
+ hidden_size = config.hidden_size
769
+ self.num_heads = config.num_attention_heads
770
+
771
+ self.self_attention = FALCON_ATTENTION_CLASSES[config._attn_implementation](config)
772
+ self.mlp = FalconMLP(config)
773
+ self.hidden_dropout = config.hidden_dropout
774
+ self.config = config
775
+
776
+ if config.new_decoder_architecture:
777
+ # The layer norm before self-attention
778
+ self.ln_attn = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
779
+ # The layer norm before the MLP
780
+ self.ln_mlp = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
781
+ else:
782
+ self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
783
+ if not config.parallel_attn:
784
+ self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
785
+
786
+ def forward(
787
+ self,
788
+ hidden_states: torch.Tensor,
789
+ alibi: Optional[torch.Tensor],
790
+ attention_mask: torch.Tensor,
791
+ position_ids: Optional[torch.LongTensor] = None,
792
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
793
+ head_mask: Optional[torch.Tensor] = None,
794
+ use_cache: bool = False,
795
+ output_attentions: bool = False,
796
+ **kwargs,
797
+ ):
798
+ if "padding_mask" in kwargs:
799
+ warnings.warn(
800
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
801
+ )
802
+
803
+ residual = hidden_states
804
+
805
+ if self.config.new_decoder_architecture:
806
+ attention_layernorm_out = self.ln_attn(hidden_states)
807
+ mlp_layernorm_out = self.ln_mlp(hidden_states)
808
+ else:
809
+ attention_layernorm_out = self.input_layernorm(hidden_states)
810
+
811
+ # Self attention.
812
+ attn_outputs = self.self_attention(
813
+ attention_layernorm_out,
814
+ layer_past=layer_past,
815
+ attention_mask=attention_mask,
816
+ position_ids=position_ids,
817
+ alibi=alibi,
818
+ head_mask=head_mask,
819
+ use_cache=use_cache,
820
+ output_attentions=output_attentions,
821
+ **kwargs,
822
+ )
823
+
824
+ attention_output = attn_outputs[0]
825
+
826
+ if not self.config.new_decoder_architecture:
827
+ if self.config.parallel_attn:
828
+ mlp_layernorm_out = attention_layernorm_out
829
+ else:
830
+ residual = dropout_add(
831
+ attention_output, residual, self.config.attention_dropout, training=self.training
832
+ )
833
+ mlp_layernorm_out = self.post_attention_layernorm(residual)
834
+
835
+ outputs = attn_outputs[1:]
836
+
837
+ # MLP.
838
+ mlp_output = self.mlp(mlp_layernorm_out)
839
+
840
+ if self.config.new_decoder_architecture or self.config.parallel_attn:
841
+ mlp_output += attention_output
842
+
843
+ output = dropout_add(mlp_output, residual, self.config.hidden_dropout, training=self.training)
844
+
845
+ if use_cache:
846
+ outputs = (output,) + outputs
847
+ else:
848
+ outputs = (output,) + outputs[1:]
849
+
850
+ return outputs # hidden_states, present, attentions
851
+
852
+
853
+ FALCON_START_DOCSTRING = r"""
854
+
855
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
856
+ library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)
857
+
858
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
859
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
860
+ and behavior.
861
+
862
+ Parameters:
863
+ config ([`FalconConfig`]): Model configuration class with all the parameters of the model.
864
+ Initializing with a config file does not load the weights associated with the model, only the
865
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
866
+ """
867
+
868
+ FALCON_INPUTS_DOCSTRING = r"""
869
+ Args:
870
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
871
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`
872
+ (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
873
+
874
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
875
+ `input_ids`.
876
+
877
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
878
+ [`PreTrainedTokenizer.__call__`] for details.
879
+
880
+ [What are input IDs?](../glossary#input-ids)
881
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.num_hidden_layers`):
882
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
883
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
884
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
885
+
886
+ Each element of `past_key_values` is a tuple (past_key, past_value):
887
+ - past_key: [batch_size * num_heads, head_dim, kv_length]
888
+ - past_value: [batch_size * num_heads, kv_length, head_dim]
889
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
890
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
891
+
892
+ - 1 for tokens that are **not masked**,
893
+ - 0 for tokens that are **masked**.
894
+
895
+ [What are attention masks?](../glossary#attention-mask)
896
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
897
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
898
+ config.n_positions - 1]`.
899
+
900
+ [What are position IDs?](../glossary#position-ids)
901
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
902
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
903
+
904
+ - 1 indicates the head is **not masked**,
905
+ - 0 indicates the head is **masked**.
906
+
907
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
908
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
909
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
910
+ model's internal embedding lookup matrix.
911
+
912
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
913
+ `past_key_values`).
914
+ use_cache (`bool`, *optional*):
915
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
916
+ `past_key_values`).
917
+ output_attentions (`bool`, *optional*):
918
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
919
+ tensors for more detail.
920
+ output_hidden_states (`bool`, *optional*):
921
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
922
+ more detail.
923
+ return_dict (`bool`, *optional*):
924
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
925
+ """
926
+
927
+
928
+ class FalconPreTrainedModel(PreTrainedModel):
929
+ """
930
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
931
+ models.
932
+ """
933
+
934
+ config_class = FalconConfig
935
+ base_model_prefix = "transformer"
936
+ supports_gradient_checkpointing = True
937
+ _no_split_modules = ["FalconDecoderLayer"]
938
+ _supports_flash_attn_2 = True
939
+ _supports_sdpa = True
940
+
941
+ def __init__(self, *inputs, **kwargs):
942
+ super().__init__(*inputs, **kwargs)
943
+
944
+ def _init_weights(self, module: nn.Module):
945
+ """Initialize the weights."""
946
+ if isinstance(module, nn.Linear) or isinstance(module, FalconLinear):
947
+ # Slightly different from the TF version which uses truncated_normal for initialization
948
+ # cf https://github.com/pytorch/pytorch/pull/5617
949
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
950
+ if module.bias is not None:
951
+ module.bias.data.zero_()
952
+ elif isinstance(module, nn.Embedding):
953
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
954
+ if module.padding_idx is not None:
955
+ module.weight.data[module.padding_idx].zero_()
956
+ elif isinstance(module, LayerNorm):
957
+ module.bias.data.zero_()
958
+ module.weight.data.fill_(1.0)
959
+
960
+ # Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa
961
+ @classmethod
962
+ def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> "PretrainedConfig":
963
+ # NOTE: Falcon supported SDPA from PyTorch 2.0. We keep it like that for backward compatibility (automatically use SDPA for torch>=2.0).
964
+ if hard_check_only:
965
+ if not is_torch_greater_or_equal_than_2_0:
966
+ raise ImportError("PyTorch SDPA requirements in Transformers are not met. Please install torch>=2.0.")
967
+
968
+ if not is_torch_greater_or_equal_than_2_0:
969
+ return config
970
+
971
+ _is_bettertransformer = getattr(cls, "use_bettertransformer", False)
972
+ if _is_bettertransformer:
973
+ return config
974
+
975
+ if not hard_check_only:
976
+ config._attn_implementation = "sdpa"
977
+ return config
978
+
979
+
980
+ @add_start_docstrings(
981
+ "The bare Falcon Model transformer outputting raw hidden-states without any specific head on top.",
982
+ FALCON_START_DOCSTRING,
983
+ )
984
+ class FalconModel(FalconPreTrainedModel):
985
+ def __init__(self, config: FalconConfig):
986
+ super().__init__(config)
987
+
988
+ self.embed_dim = config.hidden_size
989
+ self.num_heads = config.num_attention_heads
990
+ self.use_alibi = config.alibi
991
+
992
+ # Embedding + LN Embedding
993
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim)
994
+
995
+ # Transformer blocks
996
+ self.h = nn.ModuleList([FalconDecoderLayer(config) for _ in range(config.num_hidden_layers)])
997
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
998
+ self._use_sdpa = config._attn_implementation == "sdpa"
999
+
1000
+ # Final Layer Norm
1001
+ self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
1002
+
1003
+ self.gradient_checkpointing = False
1004
+
1005
+ # Initialize weights and apply final processing
1006
+ self.post_init()
1007
+
1008
+ def get_input_embeddings(self):
1009
+ return self.word_embeddings
1010
+
1011
+ def set_input_embeddings(self, new_embeddings: torch.Tensor):
1012
+ self.word_embeddings = new_embeddings
1013
+
1014
+ @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING)
1015
+ @add_code_sample_docstrings(
1016
+ checkpoint=_CHECKPOINT_FOR_DOC,
1017
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
1018
+ config_class=_CONFIG_FOR_DOC,
1019
+ )
1020
+ def forward(
1021
+ self,
1022
+ input_ids: Optional[torch.LongTensor] = None,
1023
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1024
+ attention_mask: Optional[torch.Tensor] = None,
1025
+ position_ids: Optional[torch.LongTensor] = None,
1026
+ head_mask: Optional[torch.LongTensor] = None,
1027
+ inputs_embeds: Optional[torch.LongTensor] = None,
1028
+ use_cache: Optional[bool] = None,
1029
+ output_attentions: Optional[bool] = None,
1030
+ output_hidden_states: Optional[bool] = None,
1031
+ return_dict: Optional[bool] = None,
1032
+ ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
1033
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1034
+ output_hidden_states = (
1035
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1036
+ )
1037
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1038
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1039
+
1040
+ if input_ids is not None and inputs_embeds is not None:
1041
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1042
+ elif input_ids is not None:
1043
+ batch_size, seq_length = input_ids.shape
1044
+ elif inputs_embeds is not None:
1045
+ batch_size, seq_length, _ = inputs_embeds.shape
1046
+ else:
1047
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1048
+
1049
+ if past_key_values is None:
1050
+ past_key_values = tuple([None] * len(self.h))
1051
+
1052
+ if inputs_embeds is None:
1053
+ inputs_embeds = self.word_embeddings(input_ids)
1054
+
1055
+ hidden_states = inputs_embeds
1056
+
1057
+ if self.gradient_checkpointing and self.training:
1058
+ if use_cache:
1059
+ logger.warning(
1060
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1061
+ )
1062
+ use_cache = False
1063
+ presents = () if use_cache else None
1064
+ all_self_attentions = () if output_attentions else None
1065
+ all_hidden_states = () if output_hidden_states else None
1066
+
1067
+ # Compute alibi tensor: check build_alibi_tensor documentation
1068
+ past_key_values_length = 0
1069
+ if past_key_values[0] is not None:
1070
+ past_key_values_length = past_key_values[0][0].shape[-2]
1071
+
1072
+ if self.use_alibi:
1073
+ mask = (
1074
+ torch.ones(
1075
+ (batch_size, seq_length + past_key_values_length), device=inputs_embeds.device, dtype=torch.long
1076
+ )
1077
+ if attention_mask is None
1078
+ else attention_mask
1079
+ )
1080
+ alibi = build_alibi_tensor(mask, self.num_heads, dtype=hidden_states.dtype)
1081
+ else:
1082
+ alibi = None
1083
+ if position_ids is None:
1084
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1085
+ position_ids = torch.arange(
1086
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
1087
+ )
1088
+ position_ids = position_ids.unsqueeze(0)
1089
+
1090
+ if self._use_flash_attention_2:
1091
+ # 2d mask is passed through the layers
1092
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
1093
+ elif self._use_sdpa and not output_attentions:
1094
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1095
+ # the manual implementation that requires a 4D causal mask in all cases.
1096
+ if alibi is None:
1097
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
1098
+ attention_mask,
1099
+ (batch_size, seq_length),
1100
+ inputs_embeds,
1101
+ past_key_values_length,
1102
+ )
1103
+ elif head_mask is None:
1104
+ alibi = alibi.reshape(batch_size, -1, *alibi.shape[1:])
1105
+
1106
+ attention_mask_2d = attention_mask
1107
+ # We don't call _prepare_4d_causal_attention_mask_for_sdpa as we need to mask alibi using the 4D attention_mask untouched.
1108
+ attention_mask = _prepare_4d_causal_attention_mask(
1109
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1110
+ )
1111
+
1112
+ # We take care to integrate alibi bias in the attention_mask here.
1113
+ if attention_mask_2d is None:
1114
+ attention_mask = alibi / math.sqrt(self.config.hidden_size // self.num_heads)
1115
+ else:
1116
+ min_dtype = torch.finfo(alibi.dtype).min
1117
+ attention_mask = torch.masked_fill(
1118
+ alibi / math.sqrt(self.config.hidden_size // self.num_heads),
1119
+ attention_mask < -1,
1120
+ min_dtype,
1121
+ )
1122
+
1123
+ # From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend
1124
+ # produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213
1125
+ if seq_length > 1 and attention_mask.device.type == "cuda":
1126
+ attention_mask = AttentionMaskConverter._unmask_unattended(attention_mask, min_dtype=min_dtype)
1127
+ else:
1128
+ # PyTorch SDPA does not support head_mask, we fall back on the eager implementation in this case.
1129
+ attention_mask = _prepare_4d_causal_attention_mask(
1130
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1131
+ )
1132
+ else:
1133
+ # 4d mask is passed through the layers
1134
+ attention_mask = _prepare_4d_causal_attention_mask(
1135
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
1136
+ )
1137
+
1138
+ # Prepare head mask if needed
1139
+ # 1.0 in head_mask indicate we keep the head
1140
+ # attention_probs has shape batch_size x num_heads x N x N
1141
+ # head_mask has shape n_layer x batch x num_heads x N x N
1142
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1143
+
1144
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
1145
+ if output_hidden_states:
1146
+ all_hidden_states = all_hidden_states + (hidden_states,)
1147
+
1148
+ if self.gradient_checkpointing and self.training:
1149
+ outputs = self._gradient_checkpointing_func(
1150
+ block.__call__,
1151
+ hidden_states,
1152
+ alibi,
1153
+ attention_mask,
1154
+ position_ids,
1155
+ head_mask[i],
1156
+ layer_past,
1157
+ use_cache,
1158
+ output_attentions,
1159
+ )
1160
+ else:
1161
+ outputs = block(
1162
+ hidden_states,
1163
+ layer_past=layer_past,
1164
+ attention_mask=attention_mask,
1165
+ position_ids=position_ids,
1166
+ head_mask=head_mask[i],
1167
+ use_cache=use_cache,
1168
+ output_attentions=output_attentions,
1169
+ alibi=alibi,
1170
+ )
1171
+
1172
+ hidden_states = outputs[0]
1173
+ if use_cache is True:
1174
+ presents = presents + (outputs[1],)
1175
+
1176
+ if output_attentions:
1177
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
1178
+
1179
+ # Add last hidden state
1180
+ hidden_states = self.ln_f(hidden_states)
1181
+
1182
+ if output_hidden_states:
1183
+ all_hidden_states = all_hidden_states + (hidden_states,)
1184
+
1185
+ if not return_dict:
1186
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
1187
+
1188
+ return BaseModelOutputWithPastAndCrossAttentions(
1189
+ last_hidden_state=hidden_states,
1190
+ past_key_values=presents,
1191
+ hidden_states=all_hidden_states,
1192
+ attentions=all_self_attentions,
1193
+ )
1194
+
1195
+
1196
+ @add_start_docstrings(
1197
+ "The Falcon Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).",
1198
+ FALCON_START_DOCSTRING,
1199
+ )
1200
+ class FalconForCausalLM(FalconPreTrainedModel):
1201
+ _tied_weights_keys = ["lm_head.weight"]
1202
+
1203
+ def __init__(self, config: FalconConfig):
1204
+ super().__init__(config)
1205
+ self.transformer = FalconModel(config)
1206
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1207
+
1208
+ # Initialize weights and apply final processing
1209
+ self.post_init()
1210
+
1211
+ def get_output_embeddings(self):
1212
+ return self.lm_head
1213
+
1214
+ def set_output_embeddings(self, new_embeddings: torch.Tensor):
1215
+ self.lm_head = new_embeddings
1216
+
1217
+ def prepare_inputs_for_generation(
1218
+ self,
1219
+ input_ids: torch.LongTensor,
1220
+ past_key_values: Optional[torch.Tensor] = None,
1221
+ attention_mask: Optional[torch.Tensor] = None,
1222
+ position_ids: Optional[torch.Tensor] = None,
1223
+ **kwargs,
1224
+ ) -> dict:
1225
+ if past_key_values is not None:
1226
+ past_length = past_key_values[0][0].shape[2]
1227
+
1228
+ # Some generation methods already pass only the last input ID
1229
+ if input_ids.shape[1] > past_length:
1230
+ remove_prefix_length = past_length
1231
+ else:
1232
+ # Default to old behavior: keep only final ID
1233
+ remove_prefix_length = input_ids.shape[1] - 1
1234
+
1235
+ input_ids = input_ids[:, remove_prefix_length:]
1236
+
1237
+ # Note: versions of Falcon with alibi do not use position_ids. It is used with RoPE.
1238
+ if not self.transformer.use_alibi and attention_mask is not None and position_ids is None:
1239
+ # create position_ids on the fly for batch generation
1240
+ position_ids = attention_mask.long().cumsum(-1) - 1
1241
+ position_ids.masked_fill_(attention_mask == 0, 1)
1242
+ if past_key_values:
1243
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1244
+
1245
+ return {
1246
+ "input_ids": input_ids,
1247
+ "position_ids": position_ids,
1248
+ "past_key_values": past_key_values,
1249
+ "use_cache": kwargs.get("use_cache"),
1250
+ "attention_mask": attention_mask,
1251
+ }
1252
+
1253
+ @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING)
1254
+ @add_code_sample_docstrings(
1255
+ checkpoint=_CHECKPOINT_FOR_DOC,
1256
+ output_type=CausalLMOutputWithCrossAttentions,
1257
+ config_class=_CONFIG_FOR_DOC,
1258
+ )
1259
+ def forward(
1260
+ self,
1261
+ input_ids: Optional[torch.LongTensor] = None,
1262
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1263
+ attention_mask: Optional[torch.Tensor] = None,
1264
+ position_ids: Optional[torch.LongTensor] = None,
1265
+ head_mask: Optional[torch.Tensor] = None,
1266
+ inputs_embeds: Optional[torch.Tensor] = None,
1267
+ labels: Optional[torch.Tensor] = None,
1268
+ use_cache: Optional[bool] = None,
1269
+ output_attentions: Optional[bool] = None,
1270
+ output_hidden_states: Optional[bool] = None,
1271
+ return_dict: Optional[bool] = None,
1272
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
1273
+ r"""
1274
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1275
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1276
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
1277
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
1278
+ """
1279
+
1280
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1281
+
1282
+ transformer_outputs = self.transformer(
1283
+ input_ids,
1284
+ past_key_values=past_key_values,
1285
+ attention_mask=attention_mask,
1286
+ position_ids=position_ids,
1287
+ head_mask=head_mask,
1288
+ inputs_embeds=inputs_embeds,
1289
+ use_cache=use_cache,
1290
+ output_attentions=output_attentions,
1291
+ output_hidden_states=output_hidden_states,
1292
+ return_dict=return_dict,
1293
+ )
1294
+ hidden_states = transformer_outputs[0]
1295
+
1296
+ lm_logits = self.lm_head(hidden_states)
1297
+
1298
+ loss = None
1299
+ if labels is not None:
1300
+ # Shift so that tokens < n predict n
1301
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1302
+ shift_labels = labels[..., 1:].contiguous()
1303
+ batch_size, seq_length, vocab_size = shift_logits.shape
1304
+ # Flatten the tokens
1305
+ loss_fct = CrossEntropyLoss()
1306
+ loss = loss_fct(
1307
+ shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
1308
+ )
1309
+
1310
+ if not return_dict:
1311
+ output = (lm_logits,) + transformer_outputs[1:]
1312
+ return ((loss,) + output) if loss is not None else output
1313
+
1314
+ return CausalLMOutputWithCrossAttentions(
1315
+ loss=loss,
1316
+ logits=lm_logits,
1317
+ past_key_values=transformer_outputs.past_key_values,
1318
+ hidden_states=transformer_outputs.hidden_states,
1319
+ attentions=transformer_outputs.attentions,
1320
+ )
1321
+
1322
+ def _reorder_cache(
1323
+ self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1324
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1325
+ """
1326
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1327
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1328
+ beam_idx at every generation step.
1329
+
1330
+ Output shares the same memory storage as `past`.
1331
+ """
1332
+
1333
+ # Get a copy of `beam_idx` on all the devices where we need those indices.
1334
+ device_to_beam_idx = {
1335
+ past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past
1336
+ }
1337
+ reordered_past = tuple(
1338
+ (
1339
+ layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
1340
+ layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
1341
+ )
1342
+ for layer_past in past
1343
+ )
1344
+ return reordered_past
1345
+
1346
+
1347
+ @add_start_docstrings(
1348
+ """
1349
+ The Falcon Model transformer with a sequence classification head on top (linear layer).
1350
+
1351
+ [`FalconForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1352
+ (e.g. GPT-1) do.
1353
+
1354
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1355
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1356
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1357
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1358
+ each row of the batch).
1359
+ """,
1360
+ FALCON_START_DOCSTRING,
1361
+ )
1362
+ class FalconForSequenceClassification(FalconPreTrainedModel):
1363
+ def __init__(self, config: FalconConfig):
1364
+ super().__init__(config)
1365
+ self.num_labels = config.num_labels
1366
+ self.transformer = FalconModel(config)
1367
+ self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
1368
+
1369
+ # Initialize weights and apply final processing
1370
+ self.post_init()
1371
+
1372
+ @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING)
1373
+ @add_code_sample_docstrings(
1374
+ checkpoint=_CHECKPOINT_FOR_DOC,
1375
+ output_type=SequenceClassifierOutputWithPast,
1376
+ config_class=_CONFIG_FOR_DOC,
1377
+ )
1378
+ def forward(
1379
+ self,
1380
+ input_ids: Optional[torch.LongTensor] = None,
1381
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1382
+ attention_mask: Optional[torch.Tensor] = None,
1383
+ head_mask: Optional[torch.Tensor] = None,
1384
+ inputs_embeds: Optional[torch.Tensor] = None,
1385
+ labels: Optional[torch.Tensor] = None,
1386
+ use_cache: Optional[bool] = None,
1387
+ output_attentions: Optional[bool] = None,
1388
+ output_hidden_states: Optional[bool] = None,
1389
+ return_dict: Optional[bool] = None,
1390
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
1391
+ r"""
1392
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1393
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1394
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1395
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1396
+ """
1397
+
1398
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1399
+
1400
+ transformer_outputs = self.transformer(
1401
+ input_ids,
1402
+ past_key_values=past_key_values,
1403
+ attention_mask=attention_mask,
1404
+ head_mask=head_mask,
1405
+ inputs_embeds=inputs_embeds,
1406
+ use_cache=use_cache,
1407
+ output_attentions=output_attentions,
1408
+ output_hidden_states=output_hidden_states,
1409
+ return_dict=return_dict,
1410
+ )
1411
+
1412
+ hidden_states = transformer_outputs[0]
1413
+ logits = self.score(hidden_states)
1414
+
1415
+ if input_ids is not None:
1416
+ batch_size = input_ids.shape[0]
1417
+ else:
1418
+ batch_size = inputs_embeds.shape[0]
1419
+
1420
+ if self.config.pad_token_id is None and batch_size != 1:
1421
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1422
+ if self.config.pad_token_id is None:
1423
+ sequence_lengths = -1
1424
+ else:
1425
+ if input_ids is not None:
1426
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1427
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1428
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1429
+ sequence_lengths = sequence_lengths.to(logits.device)
1430
+ else:
1431
+ sequence_lengths = -1
1432
+ logger.warning(
1433
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1434
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1435
+ )
1436
+
1437
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1438
+
1439
+ loss = None
1440
+ if labels is not None:
1441
+ if self.config.problem_type is None:
1442
+ if self.num_labels == 1:
1443
+ self.config.problem_type = "regression"
1444
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1445
+ self.config.problem_type = "single_label_classification"
1446
+ else:
1447
+ self.config.problem_type = "multi_label_classification"
1448
+
1449
+ if self.config.problem_type == "regression":
1450
+ loss_fct = MSELoss()
1451
+ if self.num_labels == 1:
1452
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1453
+ else:
1454
+ loss = loss_fct(pooled_logits, labels)
1455
+ elif self.config.problem_type == "single_label_classification":
1456
+ loss_fct = CrossEntropyLoss()
1457
+ loss = loss_fct(pooled_logits, labels)
1458
+ elif self.config.problem_type == "multi_label_classification":
1459
+ loss_fct = BCEWithLogitsLoss()
1460
+ loss = loss_fct(pooled_logits, labels)
1461
+ if not return_dict:
1462
+ output = (pooled_logits,) + transformer_outputs[1:]
1463
+ return ((loss,) + output) if loss is not None else output
1464
+
1465
+ return SequenceClassifierOutputWithPast(
1466
+ loss=loss,
1467
+ logits=pooled_logits,
1468
+ past_key_values=transformer_outputs.past_key_values,
1469
+ hidden_states=transformer_outputs.hidden_states,
1470
+ attentions=transformer_outputs.attentions,
1471
+ )
1472
+
1473
+
1474
+ @add_start_docstrings(
1475
+ """
1476
+ Falcon Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1477
+ Named-Entity-Recognition (NER) tasks.
1478
+ """,
1479
+ FALCON_START_DOCSTRING,
1480
+ )
1481
+ class FalconForTokenClassification(FalconPreTrainedModel):
1482
+ def __init__(self, config: FalconConfig):
1483
+ super().__init__(config)
1484
+ self.num_labels = config.num_labels
1485
+
1486
+ self.transformer = FalconModel(config)
1487
+ if getattr(config, "classifier_dropout", None) is not None:
1488
+ classifier_dropout = config.classifier_dropout
1489
+ elif getattr(config, "hidden_dropout", None) is not None:
1490
+ classifier_dropout = config.hidden_dropout
1491
+ else:
1492
+ classifier_dropout = 0.1
1493
+ self.dropout = nn.Dropout(classifier_dropout)
1494
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1495
+
1496
+ # Initialize weights and apply final processing
1497
+ self.post_init()
1498
+
1499
+ @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING)
1500
+ @add_code_sample_docstrings(
1501
+ checkpoint=_CHECKPOINT_FOR_DOC,
1502
+ output_type=TokenClassifierOutput,
1503
+ config_class=_CONFIG_FOR_DOC,
1504
+ )
1505
+ def forward(
1506
+ self,
1507
+ input_ids: Optional[torch.LongTensor] = None,
1508
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1509
+ attention_mask: Optional[torch.Tensor] = None,
1510
+ head_mask: Optional[torch.Tensor] = None,
1511
+ inputs_embeds: Optional[torch.Tensor] = None,
1512
+ labels: Optional[torch.Tensor] = None,
1513
+ use_cache: Optional[bool] = None,
1514
+ output_attentions: Optional[bool] = None,
1515
+ output_hidden_states: Optional[bool] = None,
1516
+ return_dict: Optional[bool] = None,
1517
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1518
+ r"""
1519
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1520
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1521
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1522
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1523
+ """
1524
+
1525
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1526
+
1527
+ transformer_outputs = self.transformer(
1528
+ input_ids,
1529
+ past_key_values=past_key_values,
1530
+ attention_mask=attention_mask,
1531
+ head_mask=head_mask,
1532
+ inputs_embeds=inputs_embeds,
1533
+ use_cache=use_cache,
1534
+ output_attentions=output_attentions,
1535
+ output_hidden_states=output_hidden_states,
1536
+ return_dict=return_dict,
1537
+ )
1538
+
1539
+ hidden_states = transformer_outputs[0]
1540
+ hidden_states = self.dropout(hidden_states)
1541
+ logits = self.classifier(hidden_states)
1542
+
1543
+ loss = None
1544
+ if labels is not None:
1545
+ batch_size, seq_length = labels.shape
1546
+ loss_fct = CrossEntropyLoss()
1547
+ loss = loss_fct(
1548
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1549
+ )
1550
+
1551
+ if not return_dict:
1552
+ output = (logits,) + transformer_outputs[2:]
1553
+ return ((loss,) + output) if loss is not None else output
1554
+
1555
+ return TokenClassifierOutput(
1556
+ loss=loss,
1557
+ logits=logits,
1558
+ hidden_states=transformer_outputs.hidden_states,
1559
+ attentions=transformer_outputs.attentions,
1560
+ )
1561
+
1562
+
1563
+ @add_start_docstrings(
1564
+ """
1565
+ The Falcon Model transformer with a span classification head on top for extractive question-answering tasks like
1566
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1567
+ """,
1568
+ FALCON_START_DOCSTRING,
1569
+ )
1570
+ class FalconForQuestionAnswering(FalconPreTrainedModel):
1571
+ def __init__(self, config):
1572
+ super().__init__(config)
1573
+ self.transformer = FalconModel(config)
1574
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1575
+
1576
+ # Initialize weights and apply final processing
1577
+ self.post_init()
1578
+
1579
+ @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING)
1580
+ def forward(
1581
+ self,
1582
+ input_ids: Optional[torch.LongTensor] = None,
1583
+ attention_mask: Optional[torch.FloatTensor] = None,
1584
+ head_mask: Optional[torch.FloatTensor] = None,
1585
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1586
+ start_positions: Optional[torch.LongTensor] = None,
1587
+ end_positions: Optional[torch.LongTensor] = None,
1588
+ output_attentions: Optional[bool] = None,
1589
+ output_hidden_states: Optional[bool] = None,
1590
+ return_dict: Optional[bool] = None,
1591
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1592
+ r"""
1593
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1594
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1595
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1596
+ are not taken into account for computing the loss.
1597
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1598
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1599
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1600
+ are not taken into account for computing the loss.
1601
+ """
1602
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1603
+
1604
+ outputs = self.transformer(
1605
+ input_ids,
1606
+ attention_mask=attention_mask,
1607
+ head_mask=head_mask,
1608
+ inputs_embeds=inputs_embeds,
1609
+ output_attentions=output_attentions,
1610
+ output_hidden_states=output_hidden_states,
1611
+ return_dict=return_dict,
1612
+ )
1613
+
1614
+ sequence_output = outputs[0]
1615
+
1616
+ logits = self.qa_outputs(sequence_output)
1617
+ start_logits, end_logits = logits.split(1, dim=-1)
1618
+ start_logits = start_logits.squeeze(-1).contiguous()
1619
+ end_logits = end_logits.squeeze(-1).contiguous()
1620
+
1621
+ total_loss = None
1622
+ if start_positions is not None and end_positions is not None:
1623
+ # If we are on multi-GPU, split add a dimension
1624
+ if len(start_positions.size()) > 1:
1625
+ start_positions = start_positions.squeeze(-1)
1626
+ if len(end_positions.size()) > 1:
1627
+ end_positions = end_positions.squeeze(-1)
1628
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1629
+ ignored_index = start_logits.size(1)
1630
+ start_positions = start_positions.clamp(0, ignored_index)
1631
+ end_positions = end_positions.clamp(0, ignored_index)
1632
+
1633
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1634
+ start_loss = loss_fct(start_logits, start_positions)
1635
+ end_loss = loss_fct(end_logits, end_positions)
1636
+ total_loss = (start_loss + end_loss) / 2
1637
+
1638
+ if not return_dict:
1639
+ output = (start_logits, end_logits) + outputs[2:]
1640
+ return ((total_loss,) + output) if total_loss is not None else output
1641
+
1642
+ return QuestionAnsweringModelOutput(
1643
+ loss=total_loss,
1644
+ start_logits=start_logits,
1645
+ end_logits=end_logits,
1646
+ hidden_states=outputs.hidden_states,
1647
+ attentions=outputs.attentions,
1648
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
28
+
29
+ try:
30
+ if not is_sentencepiece_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_layoutxlm"] = ["LayoutXLMTokenizer"]
36
+
37
+ try:
38
+ if not is_tokenizers_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["tokenization_layoutxlm_fast"] = ["LayoutXLMTokenizerFast"]
44
+
45
+ if TYPE_CHECKING:
46
+ from .processing_layoutxlm import LayoutXLMProcessor
47
+
48
+ try:
49
+ if not is_sentencepiece_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
55
+
56
+ try:
57
+ if not is_tokenizers_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
63
+
64
+ else:
65
+ import sys
66
+
67
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc ADDED
Binary file (7.26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc ADDED
Binary file (39.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc ADDED
Binary file (27.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for LayoutXLM.
17
+ """
18
+ import warnings
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class LayoutXLMProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a LayoutXLM processor which combines a LayoutXLM image processor and a LayoutXLM tokenizer into a single
29
+ processor.
30
+
31
+ [`LayoutXLMProcessor`] offers all the functionalities you need to prepare data for the model.
32
+
33
+ It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
34
+ get words and normalized bounding boxes. These are then provided to [`LayoutXLMTokenizer`] or
35
+ [`LayoutXLMTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
36
+ `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
37
+ into token-level `labels` for token classification tasks (such as FUNSD, CORD).
38
+
39
+ Args:
40
+ image_processor (`LayoutLMv2ImageProcessor`, *optional*):
41
+ An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
42
+ tokenizer (`LayoutXLMTokenizer` or `LayoutXLMTokenizerFast`, *optional*):
43
+ An instance of [`LayoutXLMTokenizer`] or [`LayoutXLMTokenizerFast`]. The tokenizer is a required input.
44
+ """
45
+
46
+ attributes = ["image_processor", "tokenizer"]
47
+ image_processor_class = "LayoutLMv2ImageProcessor"
48
+ tokenizer_class = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
49
+
50
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
51
+ if "feature_extractor" in kwargs:
52
+ warnings.warn(
53
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
54
+ " instead.",
55
+ FutureWarning,
56
+ )
57
+ feature_extractor = kwargs.pop("feature_extractor")
58
+
59
+ image_processor = image_processor if image_processor is not None else feature_extractor
60
+ if image_processor is None:
61
+ raise ValueError("You need to specify an `image_processor`.")
62
+ if tokenizer is None:
63
+ raise ValueError("You need to specify a `tokenizer`.")
64
+
65
+ super().__init__(image_processor, tokenizer)
66
+
67
+ def __call__(
68
+ self,
69
+ images,
70
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
71
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
72
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
73
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
74
+ add_special_tokens: bool = True,
75
+ padding: Union[bool, str, PaddingStrategy] = False,
76
+ truncation: Union[bool, str, TruncationStrategy] = None,
77
+ max_length: Optional[int] = None,
78
+ stride: int = 0,
79
+ pad_to_multiple_of: Optional[int] = None,
80
+ return_token_type_ids: Optional[bool] = None,
81
+ return_attention_mask: Optional[bool] = None,
82
+ return_overflowing_tokens: bool = False,
83
+ return_special_tokens_mask: bool = False,
84
+ return_offsets_mapping: bool = False,
85
+ return_length: bool = False,
86
+ verbose: bool = True,
87
+ return_tensors: Optional[Union[str, TensorType]] = None,
88
+ **kwargs,
89
+ ) -> BatchEncoding:
90
+ """
91
+ This method first forwards the `images` argument to [`~LayoutLMv2ImagePrpcessor.__call__`]. In case
92
+ [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
93
+ bounding boxes along with the additional arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output,
94
+ together with resized `images`. In case [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to
95
+ `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
96
+ arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output, together with resized `images``.
97
+
98
+ Please refer to the docstring of the above two methods for more information.
99
+ """
100
+ # verify input
101
+ if self.image_processor.apply_ocr and (boxes is not None):
102
+ raise ValueError(
103
+ "You cannot provide bounding boxes "
104
+ "if you initialized the image processor with apply_ocr set to True."
105
+ )
106
+
107
+ if self.image_processor.apply_ocr and (word_labels is not None):
108
+ raise ValueError(
109
+ "You cannot provide word labels if you initialized the image processor with apply_ocr set to True."
110
+ )
111
+
112
+ if return_overflowing_tokens is True and return_offsets_mapping is False:
113
+ raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
114
+
115
+ # first, apply the image processor
116
+ features = self.image_processor(images=images, return_tensors=return_tensors)
117
+
118
+ # second, apply the tokenizer
119
+ if text is not None and self.image_processor.apply_ocr and text_pair is None:
120
+ if isinstance(text, str):
121
+ text = [text] # add batch dimension (as the image processor always adds a batch dimension)
122
+ text_pair = features["words"]
123
+
124
+ encoded_inputs = self.tokenizer(
125
+ text=text if text is not None else features["words"],
126
+ text_pair=text_pair if text_pair is not None else None,
127
+ boxes=boxes if boxes is not None else features["boxes"],
128
+ word_labels=word_labels,
129
+ add_special_tokens=add_special_tokens,
130
+ padding=padding,
131
+ truncation=truncation,
132
+ max_length=max_length,
133
+ stride=stride,
134
+ pad_to_multiple_of=pad_to_multiple_of,
135
+ return_token_type_ids=return_token_type_ids,
136
+ return_attention_mask=return_attention_mask,
137
+ return_overflowing_tokens=return_overflowing_tokens,
138
+ return_special_tokens_mask=return_special_tokens_mask,
139
+ return_offsets_mapping=return_offsets_mapping,
140
+ return_length=return_length,
141
+ verbose=verbose,
142
+ return_tensors=return_tensors,
143
+ **kwargs,
144
+ )
145
+
146
+ # add pixel values
147
+ images = features.pop("pixel_values")
148
+ if return_overflowing_tokens is True:
149
+ images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"])
150
+ encoded_inputs["image"] = images
151
+
152
+ return encoded_inputs
153
+
154
+ def get_overflowing_images(self, images, overflow_to_sample_mapping):
155
+ # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
156
+ images_with_overflow = []
157
+ for sample_idx in overflow_to_sample_mapping:
158
+ images_with_overflow.append(images[sample_idx])
159
+
160
+ if len(images_with_overflow) != len(overflow_to_sample_mapping):
161
+ raise ValueError(
162
+ "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
163
+ f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}"
164
+ )
165
+
166
+ return images_with_overflow
167
+
168
+ def batch_decode(self, *args, **kwargs):
169
+ """
170
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
171
+ refer to the docstring of this method for more information.
172
+ """
173
+ return self.tokenizer.batch_decode(*args, **kwargs)
174
+
175
+ def decode(self, *args, **kwargs):
176
+ """
177
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
178
+ to the docstring of this method for more information.
179
+ """
180
+ return self.tokenizer.decode(*args, **kwargs)
181
+
182
+ @property
183
+ def model_input_names(self):
184
+ return ["input_ids", "bbox", "attention_mask", "image"]
185
+
186
+ @property
187
+ def feature_extractor_class(self):
188
+ warnings.warn(
189
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
190
+ FutureWarning,
191
+ )
192
+ return self.image_processor_class
193
+
194
+ @property
195
+ def feature_extractor(self):
196
+ warnings.warn(
197
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
198
+ FutureWarning,
199
+ )
200
+ return self.image_processor
env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py ADDED
@@ -0,0 +1,1174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for LayoutXLM model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple, Union
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ EncodedInput,
28
+ PreTokenizedInput,
29
+ TextInput,
30
+ TextInputPair,
31
+ TruncationStrategy,
32
+ )
33
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
34
+ from ..xlm_roberta.tokenization_xlm_roberta import (
35
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES,
36
+ PRETRAINED_VOCAB_FILES_MAP,
37
+ SPIECE_UNDERLINE,
38
+ VOCAB_FILES_NAMES,
39
+ )
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+
45
+ LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
46
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
47
+ Whether or not to encode the sequences with the special tokens relative to their model.
48
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
49
+ Activates and controls padding. Accepts the following values:
50
+
51
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
52
+ sequence if provided).
53
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
54
+ acceptable input length for the model if that argument is not provided.
55
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
56
+ lengths).
57
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
58
+ Activates and controls truncation. Accepts the following values:
59
+
60
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
61
+ to the maximum acceptable input length for the model if that argument is not provided. This will
62
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
63
+ sequences (or a batch of pairs) is provided.
64
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
65
+ maximum acceptable input length for the model if that argument is not provided. This will only
66
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
67
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
68
+ maximum acceptable input length for the model if that argument is not provided. This will only
69
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
70
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
71
+ greater than the model maximum admissible input size).
72
+ max_length (`int`, *optional*):
73
+ Controls the maximum length to use by one of the truncation/padding parameters.
74
+
75
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
76
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
77
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
78
+ stride (`int`, *optional*, defaults to 0):
79
+ If set to a number along with `max_length`, the overflowing tokens returned when
80
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
81
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
82
+ argument defines the number of overlapping tokens.
83
+ pad_to_multiple_of (`int`, *optional*):
84
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
85
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
86
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
87
+ If set, will return tensors instead of list of python integers. Acceptable values are:
88
+
89
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
90
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
91
+ - `'np'`: Return Numpy `np.ndarray` objects.
92
+ return_token_type_ids (`bool`, *optional*):
93
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
94
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
95
+
96
+ [What are token type IDs?](../glossary#token-type-ids)
97
+ return_attention_mask (`bool`, *optional*):
98
+ Whether to return the attention mask. If left to the default, will return the attention mask according
99
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
100
+
101
+ [What are attention masks?](../glossary#attention-mask)
102
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
103
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
104
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
105
+ of returning overflowing tokens.
106
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
107
+ Whether or not to return special tokens mask information.
108
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
109
+ Whether or not to return `(char_start, char_end)` for each token.
110
+
111
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
112
+ Python's tokenizer, this method will raise `NotImplementedError`.
113
+ return_length (`bool`, *optional*, defaults to `False`):
114
+ Whether or not to return the lengths of the encoded inputs.
115
+ verbose (`bool`, *optional*, defaults to `True`):
116
+ Whether or not to print more information and warnings.
117
+ **kwargs: passed to the `self.tokenize()` method
118
+
119
+ Return:
120
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
121
+
122
+ - **input_ids** -- List of token ids to be fed to a model.
123
+
124
+ [What are input IDs?](../glossary#input-ids)
125
+
126
+ - **bbox** -- List of bounding boxes to be fed to a model.
127
+
128
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
129
+ if *"token_type_ids"* is in `self.model_input_names`).
130
+
131
+ [What are token type IDs?](../glossary#token-type-ids)
132
+
133
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
134
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
135
+
136
+ [What are attention masks?](../glossary#attention-mask)
137
+
138
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
139
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
140
+ `return_overflowing_tokens=True`).
141
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
142
+ `return_overflowing_tokens=True`).
143
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
144
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
145
+ - **length** -- The length of the inputs (when `return_length=True`).
146
+ """
147
+
148
+
149
+ class LayoutXLMTokenizer(PreTrainedTokenizer):
150
+ """
151
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
152
+ [SentencePiece](https://github.com/google/sentencepiece).
153
+
154
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
155
+ this superclass for more information regarding those methods.
156
+
157
+ Args:
158
+ vocab_file (`str`):
159
+ Path to the vocabulary file.
160
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
161
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
162
+
163
+ <Tip>
164
+
165
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
166
+ sequence. The token used is the `cls_token`.
167
+
168
+ </Tip>
169
+
170
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
171
+ The end of sequence token.
172
+
173
+ <Tip>
174
+
175
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
176
+ The token used is the `sep_token`.
177
+
178
+ </Tip>
179
+
180
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
181
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
182
+ sequence classification or for a text and a question for question answering. It is also used as the last
183
+ token of a sequence built with special tokens.
184
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
185
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
186
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
187
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
188
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
189
+ token instead.
190
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
191
+ The token used for padding, for example when batching sequences of different lengths.
192
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
193
+ The token used for masking values. This is the token used when training this model with masked language
194
+ modeling. This is the token which the model will try to predict.
195
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
196
+ The bounding box to use for the special [CLS] token.
197
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
198
+ The bounding box to use for the special [SEP] token.
199
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
200
+ The bounding box to use for the special [PAD] token.
201
+ pad_token_label (`int`, *optional*, defaults to -100):
202
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
203
+ CrossEntropyLoss.
204
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
205
+ Whether or not to only label the first subword, in case word labels are provided.
206
+ sp_model_kwargs (`dict`, *optional*):
207
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
208
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
209
+ to set:
210
+
211
+ - `enable_sampling`: Enable subword regularization.
212
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
213
+
214
+ - `nbest_size = {0,1}`: No sampling is performed.
215
+ - `nbest_size > 1`: samples from the nbest_size results.
216
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
217
+ using forward-filtering-and-backward-sampling algorithm.
218
+
219
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
220
+ BPE-dropout.
221
+
222
+ Attributes:
223
+ sp_model (`SentencePieceProcessor`):
224
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
225
+ """
226
+
227
+ vocab_files_names = VOCAB_FILES_NAMES
228
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
229
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
230
+ model_input_names = ["input_ids", "attention_mask"]
231
+
232
+ def __init__(
233
+ self,
234
+ vocab_file,
235
+ bos_token="<s>",
236
+ eos_token="</s>",
237
+ sep_token="</s>",
238
+ cls_token="<s>",
239
+ unk_token="<unk>",
240
+ pad_token="<pad>",
241
+ mask_token="<mask>",
242
+ cls_token_box=[0, 0, 0, 0],
243
+ sep_token_box=[1000, 1000, 1000, 1000],
244
+ pad_token_box=[0, 0, 0, 0],
245
+ pad_token_label=-100,
246
+ only_label_first_subword=True,
247
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
248
+ **kwargs,
249
+ ) -> None:
250
+ # Mask token behave like a normal word, i.e. include the space before it
251
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
252
+
253
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
254
+
255
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
256
+ self.sp_model.Load(str(vocab_file))
257
+ self.vocab_file = vocab_file
258
+
259
+ # Original fairseq vocab and spm vocab must be "aligned":
260
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
261
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
262
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
263
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
264
+
265
+ # Mimic fairseq token-to-id alignment for the first 4 token
266
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
267
+
268
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
269
+ self.fairseq_offset = 1
270
+
271
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset
272
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
273
+
274
+ # additional properties
275
+ self.cls_token_box = cls_token_box
276
+ self.sep_token_box = sep_token_box
277
+ self.pad_token_box = pad_token_box
278
+ self.pad_token_label = pad_token_label
279
+ self.only_label_first_subword = only_label_first_subword
280
+
281
+ super().__init__(
282
+ bos_token=bos_token,
283
+ eos_token=eos_token,
284
+ unk_token=unk_token,
285
+ sep_token=sep_token,
286
+ cls_token=cls_token,
287
+ pad_token=pad_token,
288
+ mask_token=mask_token,
289
+ cls_token_box=cls_token_box,
290
+ sep_token_box=sep_token_box,
291
+ pad_token_box=pad_token_box,
292
+ pad_token_label=pad_token_label,
293
+ only_label_first_subword=only_label_first_subword,
294
+ sp_model_kwargs=self.sp_model_kwargs,
295
+ **kwargs,
296
+ )
297
+
298
+ def __getstate__(self):
299
+ state = self.__dict__.copy()
300
+ state["sp_model"] = None
301
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
302
+ return state
303
+
304
+ def __setstate__(self, d):
305
+ self.__dict__ = d
306
+
307
+ # for backward compatibility
308
+ if not hasattr(self, "sp_model_kwargs"):
309
+ self.sp_model_kwargs = {}
310
+
311
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
312
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
313
+
314
+ def build_inputs_with_special_tokens(
315
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
316
+ ) -> List[int]:
317
+ """
318
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
319
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
320
+
321
+ - single sequence: `<s> X </s>`
322
+ - pair of sequences: `<s> A </s></s> B </s>`
323
+
324
+ Args:
325
+ token_ids_0 (`List[int]`):
326
+ List of IDs to which the special tokens will be added.
327
+ token_ids_1 (`List[int]`, *optional*):
328
+ Optional second list of IDs for sequence pairs.
329
+
330
+ Returns:
331
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
332
+ """
333
+
334
+ if token_ids_1 is None:
335
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
336
+ cls = [self.cls_token_id]
337
+ sep = [self.sep_token_id]
338
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
339
+
340
+ def get_special_tokens_mask(
341
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
342
+ ) -> List[int]:
343
+ """
344
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
345
+ special tokens using the tokenizer `prepare_for_model` method.
346
+
347
+ Args:
348
+ token_ids_0 (`List[int]`):
349
+ List of IDs.
350
+ token_ids_1 (`List[int]`, *optional*):
351
+ Optional second list of IDs for sequence pairs.
352
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
353
+ Whether or not the token list is already formatted with special tokens for the model.
354
+
355
+ Returns:
356
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
357
+ """
358
+
359
+ if already_has_special_tokens:
360
+ return super().get_special_tokens_mask(
361
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
362
+ )
363
+
364
+ if token_ids_1 is None:
365
+ return [1] + ([0] * len(token_ids_0)) + [1]
366
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
367
+
368
+ def create_token_type_ids_from_sequences(
369
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
370
+ ) -> List[int]:
371
+ """
372
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
373
+ not make use of token type ids, therefore a list of zeros is returned.
374
+
375
+ Args:
376
+ token_ids_0 (`List[int]`):
377
+ List of IDs.
378
+ token_ids_1 (`List[int]`, *optional*):
379
+ Optional second list of IDs for sequence pairs.
380
+
381
+ Returns:
382
+ `List[int]`: List of zeros.
383
+
384
+ """
385
+
386
+ sep = [self.sep_token_id]
387
+ cls = [self.cls_token_id]
388
+
389
+ if token_ids_1 is None:
390
+ return len(cls + token_ids_0 + sep) * [0]
391
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
392
+
393
+ @property
394
+ def vocab_size(self):
395
+ return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
396
+
397
+ def get_vocab(self):
398
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
399
+ vocab.update(self.added_tokens_encoder)
400
+ return vocab
401
+
402
+ def _tokenize(self, text: str) -> List[str]:
403
+ return self.sp_model.encode(text, out_type=str)
404
+
405
+ def _convert_token_to_id(self, token):
406
+ """Converts a token (str) in an id using the vocab."""
407
+ if token in self.fairseq_tokens_to_ids:
408
+ return self.fairseq_tokens_to_ids[token]
409
+ spm_id = self.sp_model.PieceToId(token)
410
+
411
+ # Need to return unknown token if the SP model returned 0
412
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
413
+
414
+ def _convert_id_to_token(self, index):
415
+ """Converts an index (integer) in a token (str) using the vocab."""
416
+ if index in self.fairseq_ids_to_tokens:
417
+ return self.fairseq_ids_to_tokens[index]
418
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
419
+
420
+ def convert_tokens_to_string(self, tokens):
421
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
422
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
423
+ return out_string
424
+
425
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
426
+ if not os.path.isdir(save_directory):
427
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
428
+ return
429
+ out_vocab_file = os.path.join(
430
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
431
+ )
432
+
433
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
434
+ copyfile(self.vocab_file, out_vocab_file)
435
+ elif not os.path.isfile(self.vocab_file):
436
+ with open(out_vocab_file, "wb") as fi:
437
+ content_spiece_model = self.sp_model.serialized_model_proto()
438
+ fi.write(content_spiece_model)
439
+
440
+ return (out_vocab_file,)
441
+
442
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
443
+ def __call__(
444
+ self,
445
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
446
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
447
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
448
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
449
+ add_special_tokens: bool = True,
450
+ padding: Union[bool, str, PaddingStrategy] = False,
451
+ truncation: Union[bool, str, TruncationStrategy] = None,
452
+ max_length: Optional[int] = None,
453
+ stride: int = 0,
454
+ pad_to_multiple_of: Optional[int] = None,
455
+ return_tensors: Optional[Union[str, TensorType]] = None,
456
+ return_token_type_ids: Optional[bool] = None,
457
+ return_attention_mask: Optional[bool] = None,
458
+ return_overflowing_tokens: bool = False,
459
+ return_special_tokens_mask: bool = False,
460
+ return_offsets_mapping: bool = False,
461
+ return_length: bool = False,
462
+ verbose: bool = True,
463
+ **kwargs,
464
+ ) -> BatchEncoding:
465
+ """
466
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
467
+ sequences with word-level normalized bounding boxes and optional labels.
468
+
469
+ Args:
470
+ text (`str`, `List[str]`, `List[List[str]]`):
471
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
472
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
473
+ words).
474
+ text_pair (`List[str]`, `List[List[str]]`):
475
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
476
+ (pretokenized string).
477
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
478
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
479
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
480
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
481
+ """
482
+
483
+ # Input type checking for clearer error
484
+ def _is_valid_text_input(t):
485
+ if isinstance(t, str):
486
+ # Strings are fine
487
+ return True
488
+ elif isinstance(t, (list, tuple)):
489
+ # List are fine as long as they are...
490
+ if len(t) == 0:
491
+ # ... empty
492
+ return True
493
+ elif isinstance(t[0], str):
494
+ # ... list of strings
495
+ return True
496
+ elif isinstance(t[0], (list, tuple)):
497
+ # ... list with an empty list or with a list of strings
498
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
499
+ else:
500
+ return False
501
+ else:
502
+ return False
503
+
504
+ if text_pair is not None:
505
+ # in case text + text_pair are provided, text = questions, text_pair = words
506
+ if not _is_valid_text_input(text):
507
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
508
+ if not isinstance(text_pair, (list, tuple)):
509
+ raise ValueError(
510
+ "words must of type `List[str]` (single pretokenized example), "
511
+ "or `List[List[str]]` (batch of pretokenized examples)."
512
+ )
513
+ else:
514
+ # in case only text is provided => must be words
515
+ if not isinstance(text, (list, tuple)):
516
+ raise ValueError(
517
+ "Words must of type `List[str]` (single pretokenized example), "
518
+ "or `List[List[str]]` (batch of pretokenized examples)."
519
+ )
520
+
521
+ if text_pair is not None:
522
+ is_batched = isinstance(text, (list, tuple))
523
+ else:
524
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
525
+
526
+ words = text if text_pair is None else text_pair
527
+ if boxes is None:
528
+ raise ValueError("You must provide corresponding bounding boxes")
529
+ if is_batched:
530
+ if len(words) != len(boxes):
531
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
532
+ for words_example, boxes_example in zip(words, boxes):
533
+ if len(words_example) != len(boxes_example):
534
+ raise ValueError("You must provide as many words as there are bounding boxes")
535
+ else:
536
+ if len(words) != len(boxes):
537
+ raise ValueError("You must provide as many words as there are bounding boxes")
538
+
539
+ if is_batched:
540
+ if text_pair is not None and len(text) != len(text_pair):
541
+ raise ValueError(
542
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
543
+ f" {len(text_pair)}."
544
+ )
545
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
546
+ is_pair = bool(text_pair is not None)
547
+ return self.batch_encode_plus(
548
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
549
+ is_pair=is_pair,
550
+ boxes=boxes,
551
+ word_labels=word_labels,
552
+ add_special_tokens=add_special_tokens,
553
+ padding=padding,
554
+ truncation=truncation,
555
+ max_length=max_length,
556
+ stride=stride,
557
+ pad_to_multiple_of=pad_to_multiple_of,
558
+ return_tensors=return_tensors,
559
+ return_token_type_ids=return_token_type_ids,
560
+ return_attention_mask=return_attention_mask,
561
+ return_overflowing_tokens=return_overflowing_tokens,
562
+ return_special_tokens_mask=return_special_tokens_mask,
563
+ return_offsets_mapping=return_offsets_mapping,
564
+ return_length=return_length,
565
+ verbose=verbose,
566
+ **kwargs,
567
+ )
568
+ else:
569
+ return self.encode_plus(
570
+ text=text,
571
+ text_pair=text_pair,
572
+ boxes=boxes,
573
+ word_labels=word_labels,
574
+ add_special_tokens=add_special_tokens,
575
+ padding=padding,
576
+ truncation=truncation,
577
+ max_length=max_length,
578
+ stride=stride,
579
+ pad_to_multiple_of=pad_to_multiple_of,
580
+ return_tensors=return_tensors,
581
+ return_token_type_ids=return_token_type_ids,
582
+ return_attention_mask=return_attention_mask,
583
+ return_overflowing_tokens=return_overflowing_tokens,
584
+ return_special_tokens_mask=return_special_tokens_mask,
585
+ return_offsets_mapping=return_offsets_mapping,
586
+ return_length=return_length,
587
+ verbose=verbose,
588
+ **kwargs,
589
+ )
590
+
591
+ def _batch_encode_plus(
592
+ self,
593
+ batch_text_or_text_pairs: Union[
594
+ List[TextInput],
595
+ List[TextInputPair],
596
+ List[PreTokenizedInput],
597
+ ],
598
+ is_pair: bool = None,
599
+ boxes: Optional[List[List[List[int]]]] = None,
600
+ word_labels: Optional[List[List[int]]] = None,
601
+ add_special_tokens: bool = True,
602
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
603
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
604
+ max_length: Optional[int] = None,
605
+ stride: int = 0,
606
+ pad_to_multiple_of: Optional[int] = None,
607
+ return_tensors: Optional[Union[str, TensorType]] = None,
608
+ return_token_type_ids: Optional[bool] = None,
609
+ return_attention_mask: Optional[bool] = None,
610
+ return_overflowing_tokens: bool = False,
611
+ return_special_tokens_mask: bool = False,
612
+ return_offsets_mapping: bool = False,
613
+ return_length: bool = False,
614
+ verbose: bool = True,
615
+ **kwargs,
616
+ ) -> BatchEncoding:
617
+ if return_offsets_mapping:
618
+ raise NotImplementedError(
619
+ "return_offset_mapping is not available when using Python tokenizers. "
620
+ "To use this feature, change your tokenizer to one deriving from "
621
+ "transformers.PreTrainedTokenizerFast."
622
+ )
623
+
624
+ batch_outputs = self._batch_prepare_for_model(
625
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
626
+ is_pair=is_pair,
627
+ boxes=boxes,
628
+ word_labels=word_labels,
629
+ add_special_tokens=add_special_tokens,
630
+ padding_strategy=padding_strategy,
631
+ truncation_strategy=truncation_strategy,
632
+ max_length=max_length,
633
+ stride=stride,
634
+ pad_to_multiple_of=pad_to_multiple_of,
635
+ return_attention_mask=return_attention_mask,
636
+ return_token_type_ids=return_token_type_ids,
637
+ return_overflowing_tokens=return_overflowing_tokens,
638
+ return_special_tokens_mask=return_special_tokens_mask,
639
+ return_length=return_length,
640
+ return_tensors=return_tensors,
641
+ verbose=verbose,
642
+ )
643
+
644
+ return BatchEncoding(batch_outputs)
645
+
646
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
647
+ def _batch_prepare_for_model(
648
+ self,
649
+ batch_text_or_text_pairs,
650
+ is_pair: bool = None,
651
+ boxes: Optional[List[List[int]]] = None,
652
+ word_labels: Optional[List[List[int]]] = None,
653
+ add_special_tokens: bool = True,
654
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
655
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
656
+ max_length: Optional[int] = None,
657
+ stride: int = 0,
658
+ pad_to_multiple_of: Optional[int] = None,
659
+ return_tensors: Optional[str] = None,
660
+ return_token_type_ids: Optional[bool] = None,
661
+ return_attention_mask: Optional[bool] = None,
662
+ return_overflowing_tokens: bool = False,
663
+ return_special_tokens_mask: bool = False,
664
+ return_length: bool = False,
665
+ verbose: bool = True,
666
+ ) -> BatchEncoding:
667
+ """
668
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
669
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
670
+ manages a moving window (with user defined stride) for overflowing tokens
671
+
672
+ Args:
673
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
674
+ """
675
+
676
+ batch_outputs = {}
677
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
678
+ batch_text_or_text_pair, boxes_example = example
679
+ outputs = self.prepare_for_model(
680
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
681
+ batch_text_or_text_pair[1] if is_pair else None,
682
+ boxes_example,
683
+ word_labels=word_labels[idx] if word_labels is not None else None,
684
+ add_special_tokens=add_special_tokens,
685
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
686
+ truncation=truncation_strategy.value,
687
+ max_length=max_length,
688
+ stride=stride,
689
+ pad_to_multiple_of=None, # we pad in batch afterward
690
+ return_attention_mask=False, # we pad in batch afterward
691
+ return_token_type_ids=return_token_type_ids,
692
+ return_overflowing_tokens=return_overflowing_tokens,
693
+ return_special_tokens_mask=return_special_tokens_mask,
694
+ return_length=return_length,
695
+ return_tensors=None, # We convert the whole batch to tensors at the end
696
+ prepend_batch_axis=False,
697
+ verbose=verbose,
698
+ )
699
+
700
+ for key, value in outputs.items():
701
+ if key not in batch_outputs:
702
+ batch_outputs[key] = []
703
+ batch_outputs[key].append(value)
704
+
705
+ batch_outputs = self.pad(
706
+ batch_outputs,
707
+ padding=padding_strategy.value,
708
+ max_length=max_length,
709
+ pad_to_multiple_of=pad_to_multiple_of,
710
+ return_attention_mask=return_attention_mask,
711
+ )
712
+
713
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
714
+
715
+ return batch_outputs
716
+
717
+ def _encode_plus(
718
+ self,
719
+ text: Union[TextInput, PreTokenizedInput],
720
+ text_pair: Optional[PreTokenizedInput] = None,
721
+ boxes: Optional[List[List[int]]] = None,
722
+ word_labels: Optional[List[int]] = None,
723
+ add_special_tokens: bool = True,
724
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
725
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
726
+ max_length: Optional[int] = None,
727
+ stride: int = 0,
728
+ pad_to_multiple_of: Optional[int] = None,
729
+ return_tensors: Optional[Union[str, TensorType]] = None,
730
+ return_token_type_ids: Optional[bool] = None,
731
+ return_attention_mask: Optional[bool] = None,
732
+ return_overflowing_tokens: bool = False,
733
+ return_special_tokens_mask: bool = False,
734
+ return_offsets_mapping: bool = False,
735
+ return_length: bool = False,
736
+ verbose: bool = True,
737
+ **kwargs,
738
+ ) -> BatchEncoding:
739
+ if return_offsets_mapping:
740
+ raise NotImplementedError(
741
+ "return_offset_mapping is not available when using Python tokenizers. "
742
+ "To use this feature, change your tokenizer to one deriving from "
743
+ "transformers.PreTrainedTokenizerFast. "
744
+ "More information on available tokenizers at "
745
+ "https://github.com/huggingface/transformers/pull/2674"
746
+ )
747
+
748
+ return self.prepare_for_model(
749
+ text=text,
750
+ text_pair=text_pair,
751
+ boxes=boxes,
752
+ word_labels=word_labels,
753
+ add_special_tokens=add_special_tokens,
754
+ padding=padding_strategy.value,
755
+ truncation=truncation_strategy.value,
756
+ max_length=max_length,
757
+ stride=stride,
758
+ pad_to_multiple_of=pad_to_multiple_of,
759
+ return_tensors=return_tensors,
760
+ prepend_batch_axis=True,
761
+ return_attention_mask=return_attention_mask,
762
+ return_token_type_ids=return_token_type_ids,
763
+ return_overflowing_tokens=return_overflowing_tokens,
764
+ return_special_tokens_mask=return_special_tokens_mask,
765
+ return_length=return_length,
766
+ verbose=verbose,
767
+ )
768
+
769
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
770
+ def prepare_for_model(
771
+ self,
772
+ text: Union[TextInput, PreTokenizedInput],
773
+ text_pair: Optional[PreTokenizedInput] = None,
774
+ boxes: Optional[List[List[int]]] = None,
775
+ word_labels: Optional[List[int]] = None,
776
+ add_special_tokens: bool = True,
777
+ padding: Union[bool, str, PaddingStrategy] = False,
778
+ truncation: Union[bool, str, TruncationStrategy] = None,
779
+ max_length: Optional[int] = None,
780
+ stride: int = 0,
781
+ pad_to_multiple_of: Optional[int] = None,
782
+ return_tensors: Optional[Union[str, TensorType]] = None,
783
+ return_token_type_ids: Optional[bool] = None,
784
+ return_attention_mask: Optional[bool] = None,
785
+ return_overflowing_tokens: bool = False,
786
+ return_special_tokens_mask: bool = False,
787
+ return_offsets_mapping: bool = False,
788
+ return_length: bool = False,
789
+ verbose: bool = True,
790
+ prepend_batch_axis: bool = False,
791
+ **kwargs,
792
+ ) -> BatchEncoding:
793
+ """
794
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
795
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
796
+ (with user defined stride) for overflowing tokens.
797
+
798
+ Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
799
+ token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
800
+ labeled with -100, such that they will be ignored by the loss function.
801
+
802
+ Args:
803
+ text (`str`, `List[str]`, `List[List[str]]`):
804
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
805
+ text_pair (`List[str]` or `List[int]`, *optional*):
806
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
807
+ list of list of strings (words of a batch of examples).
808
+ """
809
+
810
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
811
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
812
+ padding=padding,
813
+ truncation=truncation,
814
+ max_length=max_length,
815
+ pad_to_multiple_of=pad_to_multiple_of,
816
+ verbose=verbose,
817
+ **kwargs,
818
+ )
819
+
820
+ tokens = []
821
+ pair_tokens = []
822
+ token_boxes = []
823
+ pair_token_boxes = []
824
+ labels = []
825
+
826
+ if text_pair is None:
827
+ if word_labels is None:
828
+ # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
829
+ for word, box in zip(text, boxes):
830
+ if len(word) < 1: # skip empty words
831
+ continue
832
+ word_tokens = self.tokenize(word)
833
+ tokens.extend(word_tokens)
834
+ token_boxes.extend([box] * len(word_tokens))
835
+ else:
836
+ # CASE 2: token classification (training)
837
+ for word, box, label in zip(text, boxes, word_labels):
838
+ if len(word) < 1: # skip empty words
839
+ continue
840
+ word_tokens = self.tokenize(word)
841
+ tokens.extend(word_tokens)
842
+ token_boxes.extend([box] * len(word_tokens))
843
+ if self.only_label_first_subword:
844
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
845
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
846
+ else:
847
+ labels.extend([label] * len(word_tokens))
848
+ else:
849
+ # CASE 3: document visual question answering (inference)
850
+ # text = question
851
+ # text_pair = words
852
+ tokens = self.tokenize(text)
853
+ token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box]
854
+
855
+ for word, box in zip(text_pair, boxes):
856
+ if len(word) < 1: # skip empty words
857
+ continue
858
+ word_tokens = self.tokenize(word)
859
+ pair_tokens.extend(word_tokens)
860
+ pair_token_boxes.extend([box] * len(word_tokens))
861
+
862
+ # Create ids + pair_ids
863
+ ids = self.convert_tokens_to_ids(tokens)
864
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
865
+
866
+ # Compute the total size of the returned encodings
867
+ pair = bool(pair_ids is not None)
868
+ len_ids = len(ids)
869
+ len_pair_ids = len(pair_ids) if pair else 0
870
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
871
+
872
+ # Truncation: Handle max sequence length
873
+ overflowing_tokens = []
874
+ overflowing_token_boxes = []
875
+ overflowing_labels = []
876
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
877
+ (
878
+ ids,
879
+ token_boxes,
880
+ pair_ids,
881
+ pair_token_boxes,
882
+ labels,
883
+ overflowing_tokens,
884
+ overflowing_token_boxes,
885
+ overflowing_labels,
886
+ ) = self.truncate_sequences(
887
+ ids,
888
+ token_boxes,
889
+ pair_ids=pair_ids,
890
+ pair_token_boxes=pair_token_boxes,
891
+ labels=labels,
892
+ num_tokens_to_remove=total_len - max_length,
893
+ truncation_strategy=truncation_strategy,
894
+ stride=stride,
895
+ )
896
+
897
+ if return_token_type_ids and not add_special_tokens:
898
+ raise ValueError(
899
+ "Asking to return token_type_ids while setting add_special_tokens to False "
900
+ "results in an undefined behavior. Please set add_special_tokens to True or "
901
+ "set return_token_type_ids to None."
902
+ )
903
+
904
+ # Load from model defaults
905
+ if return_token_type_ids is None:
906
+ return_token_type_ids = "token_type_ids" in self.model_input_names
907
+ if return_attention_mask is None:
908
+ return_attention_mask = "attention_mask" in self.model_input_names
909
+
910
+ encoded_inputs = {}
911
+
912
+ if return_overflowing_tokens:
913
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
914
+ encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
915
+ encoded_inputs["overflowing_labels"] = overflowing_labels
916
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
917
+
918
+ # Add special tokens
919
+ if add_special_tokens:
920
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
921
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
922
+ token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
923
+ if pair_token_boxes:
924
+ pair_token_boxes = pair_token_boxes + [self.sep_token_box]
925
+ if labels:
926
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
927
+ else:
928
+ sequence = ids + pair_ids if pair else ids
929
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
930
+
931
+ # Build output dictionary
932
+ encoded_inputs["input_ids"] = sequence
933
+ encoded_inputs["bbox"] = token_boxes + pair_token_boxes
934
+ if return_token_type_ids:
935
+ encoded_inputs["token_type_ids"] = token_type_ids
936
+ if return_special_tokens_mask:
937
+ if add_special_tokens:
938
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
939
+ else:
940
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
941
+
942
+ if labels:
943
+ encoded_inputs["labels"] = labels
944
+
945
+ # Check lengths
946
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
947
+
948
+ # Padding
949
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
950
+ encoded_inputs = self.pad(
951
+ encoded_inputs,
952
+ max_length=max_length,
953
+ padding=padding_strategy.value,
954
+ pad_to_multiple_of=pad_to_multiple_of,
955
+ return_attention_mask=return_attention_mask,
956
+ )
957
+
958
+ if return_length:
959
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
960
+
961
+ batch_outputs = BatchEncoding(
962
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
963
+ )
964
+
965
+ return batch_outputs
966
+
967
+ def truncate_sequences(
968
+ self,
969
+ ids: List[int],
970
+ token_boxes: List[List[int]],
971
+ pair_ids: Optional[List[int]] = None,
972
+ pair_token_boxes: Optional[List[List[int]]] = None,
973
+ labels: Optional[List[int]] = None,
974
+ num_tokens_to_remove: int = 0,
975
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
976
+ stride: int = 0,
977
+ ) -> Tuple[List[int], List[int], List[int]]:
978
+ """
979
+ Truncates a sequence pair in-place following the strategy.
980
+
981
+ Args:
982
+ ids (`List[int]`):
983
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
984
+ `convert_tokens_to_ids` methods.
985
+ token_boxes (`List[List[int]]`):
986
+ Bounding boxes of the first sequence.
987
+ pair_ids (`List[int]`, *optional*):
988
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
989
+ and `convert_tokens_to_ids` methods.
990
+ pair_token_boxes (`List[List[int]]`, *optional*):
991
+ Bounding boxes of the second sequence.
992
+ labels (`List[int]`, *optional*):
993
+ Labels of the first sequence (for token classification tasks).
994
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
995
+ Number of tokens to remove using the truncation strategy.
996
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
997
+ The strategy to follow for truncation. Can be:
998
+
999
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1000
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
1001
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
1002
+ batch of pairs) is provided.
1003
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1004
+ maximum acceptable input length for the model if that argument is not provided. This will only
1005
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1006
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
1007
+ maximum acceptable input length for the model if that argument is not provided. This will only
1008
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1009
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
1010
+ than the model maximum admissible input size).
1011
+ stride (`int`, *optional*, defaults to 0):
1012
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
1013
+ sequence returned. The value of this argument defines the number of additional tokens.
1014
+
1015
+ Returns:
1016
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
1017
+ overflowing tokens.
1018
+ """
1019
+ if num_tokens_to_remove <= 0:
1020
+ return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
1021
+
1022
+ if not isinstance(truncation_strategy, TruncationStrategy):
1023
+ truncation_strategy = TruncationStrategy(truncation_strategy)
1024
+
1025
+ overflowing_tokens = []
1026
+ overflowing_token_boxes = []
1027
+ overflowing_labels = []
1028
+ if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
1029
+ for _ in range(num_tokens_to_remove):
1030
+ if pair_ids is None or len(ids) > len(pair_ids):
1031
+ if not overflowing_tokens:
1032
+ window_len = min(len(ids), stride + 1)
1033
+ else:
1034
+ window_len = 1
1035
+ overflowing_tokens.extend(ids[-window_len:])
1036
+ overflowing_token_boxes.extend(token_boxes[-window_len:])
1037
+ overflowing_labels.extend(labels[-window_len:])
1038
+ ids = ids[:-1]
1039
+ token_boxes = token_boxes[:-1]
1040
+ labels = labels[:-1]
1041
+ else:
1042
+ if not overflowing_tokens:
1043
+ window_len = min(len(pair_ids), stride + 1)
1044
+ else:
1045
+ window_len = 1
1046
+ overflowing_tokens.extend(pair_ids[-window_len:])
1047
+ overflowing_token_boxes.extend(pair_token_boxes[-window_len:])
1048
+ pair_ids = pair_ids[:-1]
1049
+ pair_token_boxes = pair_token_boxes[:-1]
1050
+ elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
1051
+ if len(ids) > num_tokens_to_remove:
1052
+ window_len = min(len(ids), stride + num_tokens_to_remove)
1053
+ overflowing_tokens = ids[-window_len:]
1054
+ overflowing_token_boxes = token_boxes[-window_len:]
1055
+ overflowing_labels = labels[-window_len:]
1056
+ ids = ids[:-num_tokens_to_remove]
1057
+ token_boxes = token_boxes[:-num_tokens_to_remove]
1058
+ labels = labels[:-num_tokens_to_remove]
1059
+ else:
1060
+ logger.error(
1061
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1062
+ f"but the first sequence has a length {len(ids)}. "
1063
+ f"Please select another truncation strategy than {truncation_strategy}, "
1064
+ "for instance 'longest_first' or 'only_second'."
1065
+ )
1066
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
1067
+ if len(pair_ids) > num_tokens_to_remove:
1068
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
1069
+ overflowing_tokens = pair_ids[-window_len:]
1070
+ overflowing_token_boxes = pair_token_boxes[-window_len:]
1071
+ pair_ids = pair_ids[:-num_tokens_to_remove]
1072
+ pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
1073
+ else:
1074
+ logger.error(
1075
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1076
+ f"but the second sequence has a length {len(pair_ids)}. "
1077
+ f"Please select another truncation strategy than {truncation_strategy}, "
1078
+ "for instance 'longest_first' or 'only_first'."
1079
+ )
1080
+
1081
+ return (
1082
+ ids,
1083
+ token_boxes,
1084
+ pair_ids,
1085
+ pair_token_boxes,
1086
+ labels,
1087
+ overflowing_tokens,
1088
+ overflowing_token_boxes,
1089
+ overflowing_labels,
1090
+ )
1091
+
1092
+ def _pad(
1093
+ self,
1094
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1095
+ max_length: Optional[int] = None,
1096
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1097
+ pad_to_multiple_of: Optional[int] = None,
1098
+ return_attention_mask: Optional[bool] = None,
1099
+ ) -> dict:
1100
+ """
1101
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1102
+
1103
+ Args:
1104
+ encoded_inputs:
1105
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1106
+ max_length: maximum length of the returned list and optionally padding length (see below).
1107
+ Will truncate by taking into account the special tokens.
1108
+ padding_strategy: PaddingStrategy to use for padding.
1109
+
1110
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1111
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1112
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1113
+ The tokenizer padding sides are defined in self.padding_side:
1114
+
1115
+ - 'left': pads on the left of the sequences
1116
+ - 'right': pads on the right of the sequences
1117
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1118
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1119
+ `>= 7.5` (Volta).
1120
+ return_attention_mask:
1121
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1122
+ """
1123
+ # Load from model defaults
1124
+ if return_attention_mask is None:
1125
+ return_attention_mask = "attention_mask" in self.model_input_names
1126
+
1127
+ required_input = encoded_inputs[self.model_input_names[0]]
1128
+
1129
+ if padding_strategy == PaddingStrategy.LONGEST:
1130
+ max_length = len(required_input)
1131
+
1132
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1133
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1134
+
1135
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
1136
+
1137
+ # Initialize attention mask if not present.
1138
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1139
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
1140
+
1141
+ if needs_to_be_padded:
1142
+ difference = max_length - len(required_input)
1143
+ if self.padding_side == "right":
1144
+ if return_attention_mask:
1145
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1146
+ if "token_type_ids" in encoded_inputs:
1147
+ encoded_inputs["token_type_ids"] = (
1148
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
1149
+ )
1150
+ if "bbox" in encoded_inputs:
1151
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
1152
+ if "labels" in encoded_inputs:
1153
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
1154
+ if "special_tokens_mask" in encoded_inputs:
1155
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1156
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
1157
+ elif self.padding_side == "left":
1158
+ if return_attention_mask:
1159
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1160
+ if "token_type_ids" in encoded_inputs:
1161
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
1162
+ "token_type_ids"
1163
+ ]
1164
+ if "bbox" in encoded_inputs:
1165
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
1166
+ if "labels" in encoded_inputs:
1167
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
1168
+ if "special_tokens_mask" in encoded_inputs:
1169
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1170
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
1171
+ else:
1172
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1173
+
1174
+ return encoded_inputs
env-llmeval/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py ADDED
@@ -0,0 +1,804 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for LayoutXLM model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_base import (
24
+ BatchEncoding,
25
+ EncodedInput,
26
+ PreTokenizedInput,
27
+ TextInput,
28
+ TextInputPair,
29
+ TruncationStrategy,
30
+ )
31
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
32
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging
33
+ from ..xlm_roberta.tokenization_xlm_roberta_fast import (
34
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES,
35
+ PRETRAINED_VOCAB_FILES_MAP,
36
+ VOCAB_FILES_NAMES,
37
+ )
38
+
39
+
40
+ if is_sentencepiece_available():
41
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
42
+ else:
43
+ LayoutXLMTokenizer = None
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
49
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
50
+ Whether or not to encode the sequences with the special tokens relative to their model.
51
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
52
+ Activates and controls padding. Accepts the following values:
53
+
54
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
55
+ sequence if provided).
56
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
57
+ acceptable input length for the model if that argument is not provided.
58
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
59
+ lengths).
60
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
61
+ Activates and controls truncation. Accepts the following values:
62
+
63
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
64
+ to the maximum acceptable input length for the model if that argument is not provided. This will
65
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
66
+ sequences (or a batch of pairs) is provided.
67
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
68
+ maximum acceptable input length for the model if that argument is not provided. This will only
69
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
70
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
71
+ maximum acceptable input length for the model if that argument is not provided. This will only
72
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
73
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
74
+ greater than the model maximum admissible input size).
75
+ max_length (`int`, *optional*):
76
+ Controls the maximum length to use by one of the truncation/padding parameters.
77
+
78
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
79
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
80
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
81
+ stride (`int`, *optional*, defaults to 0):
82
+ If set to a number along with `max_length`, the overflowing tokens returned when
83
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
84
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
85
+ argument defines the number of overlapping tokens.
86
+ pad_to_multiple_of (`int`, *optional*):
87
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
88
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
89
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
90
+ If set, will return tensors instead of list of python integers. Acceptable values are:
91
+
92
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
93
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
94
+ - `'np'`: Return Numpy `np.ndarray` objects.
95
+ return_token_type_ids (`bool`, *optional*):
96
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
97
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
98
+
99
+ [What are token type IDs?](../glossary#token-type-ids)
100
+ return_attention_mask (`bool`, *optional*):
101
+ Whether to return the attention mask. If left to the default, will return the attention mask according
102
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
103
+
104
+ [What are attention masks?](../glossary#attention-mask)
105
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
106
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
107
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
108
+ of returning overflowing tokens.
109
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
110
+ Whether or not to return special tokens mask information.
111
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
112
+ Whether or not to return `(char_start, char_end)` for each token.
113
+
114
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
115
+ Python's tokenizer, this method will raise `NotImplementedError`.
116
+ return_length (`bool`, *optional*, defaults to `False`):
117
+ Whether or not to return the lengths of the encoded inputs.
118
+ verbose (`bool`, *optional*, defaults to `True`):
119
+ Whether or not to print more information and warnings.
120
+ **kwargs: passed to the `self.tokenize()` method
121
+
122
+ Return:
123
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
124
+
125
+ - **input_ids** -- List of token ids to be fed to a model.
126
+
127
+ [What are input IDs?](../glossary#input-ids)
128
+
129
+ - **bbox** -- List of bounding boxes to be fed to a model.
130
+
131
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
132
+ if *"token_type_ids"* is in `self.model_input_names`).
133
+
134
+ [What are token type IDs?](../glossary#token-type-ids)
135
+
136
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
137
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
138
+
139
+ [What are attention masks?](../glossary#attention-mask)
140
+
141
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
142
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
143
+ `return_overflowing_tokens=True`).
144
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
145
+ `return_overflowing_tokens=True`).
146
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
147
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
148
+ - **length** -- The length of the inputs (when `return_length=True`).
149
+ """
150
+
151
+
152
+ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
153
+ """
154
+ Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
155
+ [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
156
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
157
+
158
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
159
+ refer to this superclass for more information regarding those methods.
160
+
161
+ Args:
162
+ vocab_file (`str`):
163
+ Path to the vocabulary file.
164
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
165
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
166
+
167
+ <Tip>
168
+
169
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
170
+ sequence. The token used is the `cls_token`.
171
+
172
+ </Tip>
173
+
174
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
175
+ The end of sequence token.
176
+
177
+ <Tip>
178
+
179
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
180
+ The token used is the `sep_token`.
181
+
182
+ </Tip>
183
+
184
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
185
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
186
+ sequence classification or for a text and a question for question answering. It is also used as the last
187
+ token of a sequence built with special tokens.
188
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
189
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
190
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
191
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
192
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
193
+ token instead.
194
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
195
+ The token used for padding, for example when batching sequences of different lengths.
196
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
197
+ The token used for masking values. This is the token used when training this model with masked language
198
+ modeling. This is the token which the model will try to predict.
199
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
200
+ The bounding box to use for the special [CLS] token.
201
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
202
+ The bounding box to use for the special [SEP] token.
203
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
204
+ The bounding box to use for the special [PAD] token.
205
+ pad_token_label (`int`, *optional*, defaults to -100):
206
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
207
+ CrossEntropyLoss.
208
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
209
+ Whether or not to only label the first subword, in case word labels are provided.
210
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
211
+ Additional special tokens used by the tokenizer.
212
+ """
213
+
214
+ vocab_files_names = VOCAB_FILES_NAMES
215
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
216
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
217
+ model_input_names = ["input_ids", "attention_mask"]
218
+ slow_tokenizer_class = LayoutXLMTokenizer
219
+
220
+ def __init__(
221
+ self,
222
+ vocab_file=None,
223
+ tokenizer_file=None,
224
+ bos_token="<s>",
225
+ eos_token="</s>",
226
+ sep_token="</s>",
227
+ cls_token="<s>",
228
+ unk_token="<unk>",
229
+ pad_token="<pad>",
230
+ mask_token="<mask>",
231
+ cls_token_box=[0, 0, 0, 0],
232
+ sep_token_box=[1000, 1000, 1000, 1000],
233
+ pad_token_box=[0, 0, 0, 0],
234
+ pad_token_label=-100,
235
+ only_label_first_subword=True,
236
+ **kwargs,
237
+ ):
238
+ # Mask token behave like a normal word, i.e. include the space before it
239
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
240
+
241
+ super().__init__(
242
+ vocab_file,
243
+ tokenizer_file=tokenizer_file,
244
+ bos_token=bos_token,
245
+ eos_token=eos_token,
246
+ sep_token=sep_token,
247
+ cls_token=cls_token,
248
+ unk_token=unk_token,
249
+ pad_token=pad_token,
250
+ mask_token=mask_token,
251
+ cls_token_box=cls_token_box,
252
+ sep_token_box=sep_token_box,
253
+ pad_token_box=pad_token_box,
254
+ pad_token_label=pad_token_label,
255
+ only_label_first_subword=only_label_first_subword,
256
+ **kwargs,
257
+ )
258
+
259
+ self.vocab_file = vocab_file
260
+
261
+ # additional properties
262
+ self.cls_token_box = cls_token_box
263
+ self.sep_token_box = sep_token_box
264
+ self.pad_token_box = pad_token_box
265
+ self.pad_token_label = pad_token_label
266
+ self.only_label_first_subword = only_label_first_subword
267
+
268
+ @property
269
+ def can_save_slow_tokenizer(self) -> bool:
270
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
271
+
272
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
273
+ def __call__(
274
+ self,
275
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
276
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
277
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
278
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
279
+ add_special_tokens: bool = True,
280
+ padding: Union[bool, str, PaddingStrategy] = False,
281
+ truncation: Union[bool, str, TruncationStrategy] = None,
282
+ max_length: Optional[int] = None,
283
+ stride: int = 0,
284
+ pad_to_multiple_of: Optional[int] = None,
285
+ return_tensors: Optional[Union[str, TensorType]] = None,
286
+ return_token_type_ids: Optional[bool] = None,
287
+ return_attention_mask: Optional[bool] = None,
288
+ return_overflowing_tokens: bool = False,
289
+ return_special_tokens_mask: bool = False,
290
+ return_offsets_mapping: bool = False,
291
+ return_length: bool = False,
292
+ verbose: bool = True,
293
+ **kwargs,
294
+ ) -> BatchEncoding:
295
+ """
296
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
297
+ sequences with word-level normalized bounding boxes and optional labels.
298
+
299
+ Args:
300
+ text (`str`, `List[str]`, `List[List[str]]`):
301
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
302
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
303
+ words).
304
+ text_pair (`List[str]`, `List[List[str]]`):
305
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
306
+ (pretokenized string).
307
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
308
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
309
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
310
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
311
+ """
312
+
313
+ # Input type checking for clearer error
314
+ def _is_valid_text_input(t):
315
+ if isinstance(t, str):
316
+ # Strings are fine
317
+ return True
318
+ elif isinstance(t, (list, tuple)):
319
+ # List are fine as long as they are...
320
+ if len(t) == 0:
321
+ # ... empty
322
+ return True
323
+ elif isinstance(t[0], str):
324
+ # ... list of strings
325
+ return True
326
+ elif isinstance(t[0], (list, tuple)):
327
+ # ... list with an empty list or with a list of strings
328
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
329
+ else:
330
+ return False
331
+ else:
332
+ return False
333
+
334
+ if text_pair is not None:
335
+ # in case text + text_pair are provided, text = questions, text_pair = words
336
+ if not _is_valid_text_input(text):
337
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
338
+ if not isinstance(text_pair, (list, tuple)):
339
+ raise ValueError(
340
+ "words must of type `List[str]` (single pretokenized example), "
341
+ "or `List[List[str]]` (batch of pretokenized examples)."
342
+ )
343
+ else:
344
+ # in case only text is provided => must be words
345
+ if not isinstance(text, (list, tuple)):
346
+ raise ValueError(
347
+ "Words must of type `List[str]` (single pretokenized example), "
348
+ "or `List[List[str]]` (batch of pretokenized examples)."
349
+ )
350
+
351
+ if text_pair is not None:
352
+ is_batched = isinstance(text, (list, tuple))
353
+ else:
354
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
355
+
356
+ words = text if text_pair is None else text_pair
357
+ if boxes is None:
358
+ raise ValueError("You must provide corresponding bounding boxes")
359
+ if is_batched:
360
+ if len(words) != len(boxes):
361
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
362
+ for words_example, boxes_example in zip(words, boxes):
363
+ if len(words_example) != len(boxes_example):
364
+ raise ValueError("You must provide as many words as there are bounding boxes")
365
+ else:
366
+ if len(words) != len(boxes):
367
+ raise ValueError("You must provide as many words as there are bounding boxes")
368
+
369
+ if is_batched:
370
+ if text_pair is not None and len(text) != len(text_pair):
371
+ raise ValueError(
372
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
373
+ f" {len(text_pair)}."
374
+ )
375
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
376
+ is_pair = bool(text_pair is not None)
377
+ return self.batch_encode_plus(
378
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
379
+ is_pair=is_pair,
380
+ boxes=boxes,
381
+ word_labels=word_labels,
382
+ add_special_tokens=add_special_tokens,
383
+ padding=padding,
384
+ truncation=truncation,
385
+ max_length=max_length,
386
+ stride=stride,
387
+ pad_to_multiple_of=pad_to_multiple_of,
388
+ return_tensors=return_tensors,
389
+ return_token_type_ids=return_token_type_ids,
390
+ return_attention_mask=return_attention_mask,
391
+ return_overflowing_tokens=return_overflowing_tokens,
392
+ return_special_tokens_mask=return_special_tokens_mask,
393
+ return_offsets_mapping=return_offsets_mapping,
394
+ return_length=return_length,
395
+ verbose=verbose,
396
+ **kwargs,
397
+ )
398
+ else:
399
+ return self.encode_plus(
400
+ text=text,
401
+ text_pair=text_pair,
402
+ boxes=boxes,
403
+ word_labels=word_labels,
404
+ add_special_tokens=add_special_tokens,
405
+ padding=padding,
406
+ truncation=truncation,
407
+ max_length=max_length,
408
+ stride=stride,
409
+ pad_to_multiple_of=pad_to_multiple_of,
410
+ return_tensors=return_tensors,
411
+ return_token_type_ids=return_token_type_ids,
412
+ return_attention_mask=return_attention_mask,
413
+ return_overflowing_tokens=return_overflowing_tokens,
414
+ return_special_tokens_mask=return_special_tokens_mask,
415
+ return_offsets_mapping=return_offsets_mapping,
416
+ return_length=return_length,
417
+ verbose=verbose,
418
+ **kwargs,
419
+ )
420
+
421
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
422
+ batched_input = [(text, pair)] if pair else [text]
423
+ encodings = self._tokenizer.encode_batch(
424
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
425
+ )
426
+
427
+ return encodings[0].tokens
428
+
429
+ def _batch_encode_plus(
430
+ self,
431
+ batch_text_or_text_pairs: Union[
432
+ List[TextInput],
433
+ List[TextInputPair],
434
+ List[PreTokenizedInput],
435
+ ],
436
+ is_pair: bool = None,
437
+ boxes: Optional[List[List[List[int]]]] = None,
438
+ word_labels: Optional[List[List[int]]] = None,
439
+ add_special_tokens: bool = True,
440
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
441
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
442
+ max_length: Optional[int] = None,
443
+ stride: int = 0,
444
+ pad_to_multiple_of: Optional[int] = None,
445
+ return_tensors: Optional[str] = None,
446
+ return_token_type_ids: Optional[bool] = None,
447
+ return_attention_mask: Optional[bool] = None,
448
+ return_overflowing_tokens: bool = False,
449
+ return_special_tokens_mask: bool = False,
450
+ return_offsets_mapping: bool = False,
451
+ return_length: bool = False,
452
+ verbose: bool = True,
453
+ **kwargs,
454
+ ) -> BatchEncoding:
455
+ if not isinstance(batch_text_or_text_pairs, list):
456
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
457
+
458
+ # Set the truncation and padding strategy and restore the initial configuration
459
+ self.set_truncation_and_padding(
460
+ padding_strategy=padding_strategy,
461
+ truncation_strategy=truncation_strategy,
462
+ max_length=max_length,
463
+ stride=stride,
464
+ pad_to_multiple_of=pad_to_multiple_of,
465
+ )
466
+
467
+ if is_pair:
468
+ batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
469
+
470
+ encodings = self._tokenizer.encode_batch(
471
+ batch_text_or_text_pairs,
472
+ add_special_tokens=add_special_tokens,
473
+ is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
474
+ )
475
+
476
+ # Convert encoding to dict
477
+ # `Tokens` has type: Tuple[
478
+ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
479
+ # List[EncodingFast]
480
+ # ]
481
+ # with nested dimensions corresponding to batch, overflows, sequence length
482
+ tokens_and_encodings = [
483
+ self._convert_encoding(
484
+ encoding=encoding,
485
+ return_token_type_ids=return_token_type_ids,
486
+ return_attention_mask=return_attention_mask,
487
+ return_overflowing_tokens=return_overflowing_tokens,
488
+ return_special_tokens_mask=return_special_tokens_mask,
489
+ return_offsets_mapping=True
490
+ if word_labels is not None
491
+ else return_offsets_mapping, # we use offsets to create the labels
492
+ return_length=return_length,
493
+ verbose=verbose,
494
+ )
495
+ for encoding in encodings
496
+ ]
497
+
498
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
499
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
500
+ # (we say ~ because the number of overflow varies with the example in the batch)
501
+ #
502
+ # To match each overflowing sample with the original sample in the batch
503
+ # we add an overflow_to_sample_mapping array (see below)
504
+ sanitized_tokens = {}
505
+ for key in tokens_and_encodings[0][0].keys():
506
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
507
+ sanitized_tokens[key] = stack
508
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
509
+
510
+ # If returning overflowing tokens, we need to return a mapping
511
+ # from the batch idx to the original sample
512
+ if return_overflowing_tokens:
513
+ overflow_to_sample_mapping = []
514
+ for i, (toks, _) in enumerate(tokens_and_encodings):
515
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
516
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
517
+
518
+ for input_ids in sanitized_tokens["input_ids"]:
519
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
520
+
521
+ # create the token boxes
522
+ token_boxes = []
523
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
524
+ if return_overflowing_tokens:
525
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
526
+ else:
527
+ original_index = batch_index
528
+ token_boxes_example = []
529
+ for id, sequence_id, word_id in zip(
530
+ sanitized_tokens["input_ids"][batch_index],
531
+ sanitized_encodings[batch_index].sequence_ids,
532
+ sanitized_encodings[batch_index].word_ids,
533
+ ):
534
+ if word_id is not None:
535
+ if is_pair and sequence_id == 0:
536
+ token_boxes_example.append(self.pad_token_box)
537
+ else:
538
+ token_boxes_example.append(boxes[original_index][word_id])
539
+ else:
540
+ if id == self.cls_token_id:
541
+ token_boxes_example.append(self.cls_token_box)
542
+ elif id == self.sep_token_id:
543
+ token_boxes_example.append(self.sep_token_box)
544
+ elif id == self.pad_token_id:
545
+ token_boxes_example.append(self.pad_token_box)
546
+ else:
547
+ raise ValueError("Id not recognized")
548
+ token_boxes.append(token_boxes_example)
549
+
550
+ sanitized_tokens["bbox"] = token_boxes
551
+
552
+ # optionally, create the labels
553
+ if word_labels is not None:
554
+ labels = []
555
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
556
+ if return_overflowing_tokens:
557
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
558
+ else:
559
+ original_index = batch_index
560
+ labels_example = []
561
+ for id, offset, word_id in zip(
562
+ sanitized_tokens["input_ids"][batch_index],
563
+ sanitized_tokens["offset_mapping"][batch_index],
564
+ sanitized_encodings[batch_index].word_ids,
565
+ ):
566
+ if word_id is not None:
567
+ if self.only_label_first_subword:
568
+ if offset[0] == 0:
569
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
570
+ labels_example.append(word_labels[original_index][word_id])
571
+ else:
572
+ labels_example.append(self.pad_token_label)
573
+ else:
574
+ labels_example.append(word_labels[original_index][word_id])
575
+ else:
576
+ labels_example.append(self.pad_token_label)
577
+ labels.append(labels_example)
578
+
579
+ sanitized_tokens["labels"] = labels
580
+ # finally, remove offsets if the user didn't want them
581
+ if not return_offsets_mapping:
582
+ del sanitized_tokens["offset_mapping"]
583
+
584
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
585
+
586
+ def _encode_plus(
587
+ self,
588
+ text: Union[TextInput, PreTokenizedInput],
589
+ text_pair: Optional[PreTokenizedInput] = None,
590
+ boxes: Optional[List[List[int]]] = None,
591
+ word_labels: Optional[List[int]] = None,
592
+ add_special_tokens: bool = True,
593
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
594
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
595
+ max_length: Optional[int] = None,
596
+ stride: int = 0,
597
+ pad_to_multiple_of: Optional[int] = None,
598
+ return_tensors: Optional[bool] = None,
599
+ return_token_type_ids: Optional[bool] = None,
600
+ return_attention_mask: Optional[bool] = None,
601
+ return_overflowing_tokens: bool = False,
602
+ return_special_tokens_mask: bool = False,
603
+ return_offsets_mapping: bool = False,
604
+ return_length: bool = False,
605
+ verbose: bool = True,
606
+ **kwargs,
607
+ ) -> BatchEncoding:
608
+ # make it a batched input
609
+ # 2 options:
610
+ # 1) only text, in case text must be a list of str
611
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
612
+ batched_input = [(text, text_pair)] if text_pair else [text]
613
+ batched_boxes = [boxes]
614
+ batched_word_labels = [word_labels] if word_labels is not None else None
615
+ batched_output = self._batch_encode_plus(
616
+ batched_input,
617
+ is_pair=bool(text_pair is not None),
618
+ boxes=batched_boxes,
619
+ word_labels=batched_word_labels,
620
+ add_special_tokens=add_special_tokens,
621
+ padding_strategy=padding_strategy,
622
+ truncation_strategy=truncation_strategy,
623
+ max_length=max_length,
624
+ stride=stride,
625
+ pad_to_multiple_of=pad_to_multiple_of,
626
+ return_tensors=return_tensors,
627
+ return_token_type_ids=return_token_type_ids,
628
+ return_attention_mask=return_attention_mask,
629
+ return_overflowing_tokens=return_overflowing_tokens,
630
+ return_special_tokens_mask=return_special_tokens_mask,
631
+ return_offsets_mapping=return_offsets_mapping,
632
+ return_length=return_length,
633
+ verbose=verbose,
634
+ **kwargs,
635
+ )
636
+
637
+ # Return tensor is None, then we can remove the leading batch axis
638
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
639
+ if return_tensors is None and not return_overflowing_tokens:
640
+ batched_output = BatchEncoding(
641
+ {
642
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
643
+ for key, value in batched_output.items()
644
+ },
645
+ batched_output.encodings,
646
+ )
647
+
648
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
649
+
650
+ return batched_output
651
+
652
+ def _pad(
653
+ self,
654
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
655
+ max_length: Optional[int] = None,
656
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
657
+ pad_to_multiple_of: Optional[int] = None,
658
+ return_attention_mask: Optional[bool] = None,
659
+ ) -> dict:
660
+ """
661
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
662
+
663
+ Args:
664
+ encoded_inputs:
665
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
666
+ max_length: maximum length of the returned list and optionally padding length (see below).
667
+ Will truncate by taking into account the special tokens.
668
+ padding_strategy: PaddingStrategy to use for padding.
669
+
670
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
671
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
672
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
673
+ The tokenizer padding sides are defined in self.padding_side:
674
+
675
+ - 'left': pads on the left of the sequences
676
+ - 'right': pads on the right of the sequences
677
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
678
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
679
+ `>= 7.5` (Volta).
680
+ return_attention_mask:
681
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
682
+ """
683
+ # Load from model defaults
684
+ if return_attention_mask is None:
685
+ return_attention_mask = "attention_mask" in self.model_input_names
686
+
687
+ required_input = encoded_inputs[self.model_input_names[0]]
688
+
689
+ if padding_strategy == PaddingStrategy.LONGEST:
690
+ max_length = len(required_input)
691
+
692
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
693
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
694
+
695
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
696
+
697
+ # Initialize attention mask if not present.
698
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
699
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
700
+
701
+ if needs_to_be_padded:
702
+ difference = max_length - len(required_input)
703
+ if self.padding_side == "right":
704
+ if return_attention_mask:
705
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
706
+ if "token_type_ids" in encoded_inputs:
707
+ encoded_inputs["token_type_ids"] = (
708
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
709
+ )
710
+ if "bbox" in encoded_inputs:
711
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
712
+ if "labels" in encoded_inputs:
713
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
714
+ if "special_tokens_mask" in encoded_inputs:
715
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
716
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
717
+ elif self.padding_side == "left":
718
+ if return_attention_mask:
719
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
720
+ if "token_type_ids" in encoded_inputs:
721
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
722
+ "token_type_ids"
723
+ ]
724
+ if "bbox" in encoded_inputs:
725
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
726
+ if "labels" in encoded_inputs:
727
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
728
+ if "special_tokens_mask" in encoded_inputs:
729
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
730
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
731
+ else:
732
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
733
+
734
+ return encoded_inputs
735
+
736
+ def build_inputs_with_special_tokens(
737
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
738
+ ) -> List[int]:
739
+ """
740
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
741
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
742
+
743
+ - single sequence: `<s> X </s>`
744
+ - pair of sequences: `<s> A </s></s> B </s>`
745
+
746
+ Args:
747
+ token_ids_0 (`List[int]`):
748
+ List of IDs to which the special tokens will be added.
749
+ token_ids_1 (`List[int]`, *optional*):
750
+ Optional second list of IDs for sequence pairs.
751
+
752
+ Returns:
753
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
754
+ """
755
+
756
+ if token_ids_1 is None:
757
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
758
+ cls = [self.cls_token_id]
759
+ sep = [self.sep_token_id]
760
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
761
+
762
+ def create_token_type_ids_from_sequences(
763
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
764
+ ) -> List[int]:
765
+ """
766
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
767
+ not make use of token type ids, therefore a list of zeros is returned.
768
+
769
+ Args:
770
+ token_ids_0 (`List[int]`):
771
+ List of IDs.
772
+ token_ids_1 (`List[int]`, *optional*):
773
+ Optional second list of IDs for sequence pairs.
774
+
775
+ Returns:
776
+ `List[int]`: List of zeros.
777
+
778
+ """
779
+
780
+ sep = [self.sep_token_id]
781
+ cls = [self.cls_token_id]
782
+
783
+ if token_ids_1 is None:
784
+ return len(cls + token_ids_0 + sep) * [0]
785
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
786
+
787
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
788
+ if not self.can_save_slow_tokenizer:
789
+ raise ValueError(
790
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
791
+ "tokenizer."
792
+ )
793
+
794
+ if not os.path.isdir(save_directory):
795
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
796
+ return
797
+ out_vocab_file = os.path.join(
798
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
799
+ )
800
+
801
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
802
+ copyfile(self.vocab_file, out_vocab_file)
803
+
804
+ return (out_vocab_file,)
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {"configuration_levit": ["LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LevitConfig", "LevitOnnxConfig"]}
20
+
21
+ try:
22
+ if not is_vision_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["feature_extraction_levit"] = ["LevitFeatureExtractor"]
28
+ _import_structure["image_processing_levit"] = ["LevitImageProcessor"]
29
+
30
+ try:
31
+ if not is_torch_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["modeling_levit"] = [
37
+ "LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
38
+ "LevitForImageClassification",
39
+ "LevitForImageClassificationWithTeacher",
40
+ "LevitModel",
41
+ "LevitPreTrainedModel",
42
+ ]
43
+
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_levit import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, LevitConfig, LevitOnnxConfig
47
+
48
+ try:
49
+ if not is_vision_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .feature_extraction_levit import LevitFeatureExtractor
55
+ from .image_processing_levit import LevitImageProcessor
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_levit import (
64
+ LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ LevitForImageClassification,
66
+ LevitForImageClassificationWithTeacher,
67
+ LevitModel,
68
+ LevitPreTrainedModel,
69
+ )
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/configuration_levit.cpython-310.pyc ADDED
Binary file (5.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/convert_levit_timm_to_pytorch.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/feature_extraction_levit.cpython-310.pyc ADDED
Binary file (1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/image_processing_levit.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/__pycache__/modeling_levit.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/configuration_levit.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LeViT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
30
+ "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
31
+ # See all LeViT models at https://huggingface.co/models?filter=levit
32
+ }
33
+
34
+
35
+ class LevitConfig(PretrainedConfig):
36
+ r"""
37
+ This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT
38
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
39
+ defaults will yield a similar configuration to that of the LeViT
40
+ [facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture.
41
+
42
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
43
+ documentation from [`PretrainedConfig`] for more information.
44
+
45
+ Args:
46
+ image_size (`int`, *optional*, defaults to 224):
47
+ The size of the input image.
48
+ num_channels (`int`, *optional*, defaults to 3):
49
+ Number of channels in the input image.
50
+ kernel_size (`int`, *optional*, defaults to 3):
51
+ The kernel size for the initial convolution layers of patch embedding.
52
+ stride (`int`, *optional*, defaults to 2):
53
+ The stride size for the initial convolution layers of patch embedding.
54
+ padding (`int`, *optional*, defaults to 1):
55
+ The padding size for the initial convolution layers of patch embedding.
56
+ patch_size (`int`, *optional*, defaults to 16):
57
+ The patch size for embeddings.
58
+ hidden_sizes (`List[int]`, *optional*, defaults to `[128, 256, 384]`):
59
+ Dimension of each of the encoder blocks.
60
+ num_attention_heads (`List[int]`, *optional*, defaults to `[4, 8, 12]`):
61
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
62
+ depths (`List[int]`, *optional*, defaults to `[4, 4, 4]`):
63
+ The number of layers in each encoder block.
64
+ key_dim (`List[int]`, *optional*, defaults to `[16, 16, 16]`):
65
+ The size of key in each of the encoder blocks.
66
+ drop_path_rate (`int`, *optional*, defaults to 0):
67
+ The dropout probability for stochastic depths, used in the blocks of the Transformer encoder.
68
+ mlp_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
69
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
70
+ encoder blocks.
71
+ attention_ratios (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
72
+ Ratio of the size of the output dimension compared to input dimension of attention layers.
73
+ initializer_range (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import LevitConfig, LevitModel
80
+
81
+ >>> # Initializing a LeViT levit-128S style configuration
82
+ >>> configuration = LevitConfig()
83
+
84
+ >>> # Initializing a model (with random weights) from the levit-128S style configuration
85
+ >>> model = LevitModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "levit"
92
+
93
+ def __init__(
94
+ self,
95
+ image_size=224,
96
+ num_channels=3,
97
+ kernel_size=3,
98
+ stride=2,
99
+ padding=1,
100
+ patch_size=16,
101
+ hidden_sizes=[128, 256, 384],
102
+ num_attention_heads=[4, 8, 12],
103
+ depths=[4, 4, 4],
104
+ key_dim=[16, 16, 16],
105
+ drop_path_rate=0,
106
+ mlp_ratio=[2, 2, 2],
107
+ attention_ratio=[2, 2, 2],
108
+ initializer_range=0.02,
109
+ **kwargs,
110
+ ):
111
+ super().__init__(**kwargs)
112
+ self.image_size = image_size
113
+ self.num_channels = num_channels
114
+ self.kernel_size = kernel_size
115
+ self.stride = stride
116
+ self.padding = padding
117
+ self.hidden_sizes = hidden_sizes
118
+ self.num_attention_heads = num_attention_heads
119
+ self.depths = depths
120
+ self.key_dim = key_dim
121
+ self.drop_path_rate = drop_path_rate
122
+ self.patch_size = patch_size
123
+ self.attention_ratio = attention_ratio
124
+ self.mlp_ratio = mlp_ratio
125
+ self.initializer_range = initializer_range
126
+ self.down_ops = [
127
+ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
128
+ ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
129
+ ]
130
+
131
+
132
+ # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
133
+ class LevitOnnxConfig(OnnxConfig):
134
+ torch_onnx_minimum_version = version.parse("1.11")
135
+
136
+ @property
137
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
138
+ return OrderedDict(
139
+ [
140
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
141
+ ]
142
+ )
143
+
144
+ @property
145
+ def atol_for_validation(self) -> float:
146
+ return 1e-4
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/convert_levit_timm_to_pytorch.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert LeViT checkpoints from timm."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from collections import OrderedDict
21
+ from functools import partial
22
+ from pathlib import Path
23
+
24
+ import timm
25
+ import torch
26
+ from huggingface_hub import hf_hub_download
27
+
28
+ from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
29
+ from transformers.utils import logging
30
+
31
+
32
+ logging.set_verbosity_info()
33
+ logger = logging.get_logger()
34
+
35
+
36
+ def convert_weight_and_push(
37
+ hidden_sizes: int, name: str, config: LevitConfig, save_directory: Path, push_to_hub: bool = True
38
+ ):
39
+ print(f"Converting {name}...")
40
+
41
+ with torch.no_grad():
42
+ if hidden_sizes == 128:
43
+ if name[-1] == "S":
44
+ from_model = timm.create_model("levit_128s", pretrained=True)
45
+ else:
46
+ from_model = timm.create_model("levit_128", pretrained=True)
47
+ if hidden_sizes == 192:
48
+ from_model = timm.create_model("levit_192", pretrained=True)
49
+ if hidden_sizes == 256:
50
+ from_model = timm.create_model("levit_256", pretrained=True)
51
+ if hidden_sizes == 384:
52
+ from_model = timm.create_model("levit_384", pretrained=True)
53
+
54
+ from_model.eval()
55
+ our_model = LevitForImageClassificationWithTeacher(config).eval()
56
+ huggingface_weights = OrderedDict()
57
+
58
+ weights = from_model.state_dict()
59
+ og_keys = list(from_model.state_dict().keys())
60
+ new_keys = list(our_model.state_dict().keys())
61
+ print(len(og_keys), len(new_keys))
62
+ for i in range(len(og_keys)):
63
+ huggingface_weights[new_keys[i]] = weights[og_keys[i]]
64
+ our_model.load_state_dict(huggingface_weights)
65
+
66
+ x = torch.randn((2, 3, 224, 224))
67
+ out1 = from_model(x)
68
+ out2 = our_model(x).logits
69
+
70
+ assert torch.allclose(out1, out2), "The model logits don't match the original one."
71
+
72
+ checkpoint_name = name
73
+ print(checkpoint_name)
74
+
75
+ if push_to_hub:
76
+ our_model.save_pretrained(save_directory / checkpoint_name)
77
+ image_processor = LevitImageProcessor()
78
+ image_processor.save_pretrained(save_directory / checkpoint_name)
79
+
80
+ print(f"Pushed {checkpoint_name}")
81
+
82
+
83
+ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True):
84
+ filename = "imagenet-1k-id2label.json"
85
+ num_labels = 1000
86
+ expected_shape = (1, num_labels)
87
+
88
+ repo_id = "huggingface/label-files"
89
+ num_labels = num_labels
90
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
91
+ id2label = {int(k): v for k, v in id2label.items()}
92
+
93
+ id2label = id2label
94
+ label2id = {v: k for k, v in id2label.items()}
95
+
96
+ ImageNetPreTrainedConfig = partial(LevitConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
97
+
98
+ names_to_hidden_sizes = {
99
+ "levit-128S": 128,
100
+ "levit-128": 128,
101
+ "levit-192": 192,
102
+ "levit-256": 256,
103
+ "levit-384": 384,
104
+ }
105
+
106
+ names_to_config = {
107
+ "levit-128S": ImageNetPreTrainedConfig(
108
+ hidden_sizes=[128, 256, 384],
109
+ num_attention_heads=[4, 6, 8],
110
+ depths=[2, 3, 4],
111
+ key_dim=[16, 16, 16],
112
+ drop_path_rate=0,
113
+ ),
114
+ "levit-128": ImageNetPreTrainedConfig(
115
+ hidden_sizes=[128, 256, 384],
116
+ num_attention_heads=[4, 8, 12],
117
+ depths=[4, 4, 4],
118
+ key_dim=[16, 16, 16],
119
+ drop_path_rate=0,
120
+ ),
121
+ "levit-192": ImageNetPreTrainedConfig(
122
+ hidden_sizes=[192, 288, 384],
123
+ num_attention_heads=[3, 5, 6],
124
+ depths=[4, 4, 4],
125
+ key_dim=[32, 32, 32],
126
+ drop_path_rate=0,
127
+ ),
128
+ "levit-256": ImageNetPreTrainedConfig(
129
+ hidden_sizes=[256, 384, 512],
130
+ num_attention_heads=[4, 6, 8],
131
+ depths=[4, 4, 4],
132
+ key_dim=[32, 32, 32],
133
+ drop_path_rate=0,
134
+ ),
135
+ "levit-384": ImageNetPreTrainedConfig(
136
+ hidden_sizes=[384, 512, 768],
137
+ num_attention_heads=[6, 9, 12],
138
+ depths=[4, 4, 4],
139
+ key_dim=[32, 32, 32],
140
+ drop_path_rate=0.1,
141
+ ),
142
+ }
143
+
144
+ if model_name:
145
+ convert_weight_and_push(
146
+ names_to_hidden_sizes[model_name], model_name, names_to_config[model_name], save_directory, push_to_hub
147
+ )
148
+ else:
149
+ for model_name, config in names_to_config.items():
150
+ convert_weight_and_push(names_to_hidden_sizes[model_name], model_name, config, save_directory, push_to_hub)
151
+ return config, expected_shape
152
+
153
+
154
+ if __name__ == "__main__":
155
+ parser = argparse.ArgumentParser()
156
+ # Required parameters
157
+ parser.add_argument(
158
+ "--model_name",
159
+ default=None,
160
+ type=str,
161
+ help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
162
+ )
163
+ parser.add_argument(
164
+ "--pytorch_dump_folder_path",
165
+ default="levit-dump-folder/",
166
+ type=Path,
167
+ required=False,
168
+ help="Path to the output PyTorch model directory.",
169
+ )
170
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
171
+ parser.add_argument(
172
+ "--no-push_to_hub",
173
+ dest="push_to_hub",
174
+ action="store_false",
175
+ help="Do not push model and image processor to the hub",
176
+ )
177
+
178
+ args = parser.parse_args()
179
+ pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
180
+ pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
181
+ convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/feature_extraction_levit.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for LeViT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_levit import LevitImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class LevitFeatureExtractor(LevitImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class LevitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use LevitImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/image_processing_levit.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for LeViT."""
16
+
17
+ from typing import Dict, Iterable, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ get_resize_output_image_size,
24
+ resize,
25
+ to_channel_dimension_format,
26
+ )
27
+ from ...image_utils import (
28
+ IMAGENET_DEFAULT_MEAN,
29
+ IMAGENET_DEFAULT_STD,
30
+ ChannelDimension,
31
+ ImageInput,
32
+ PILImageResampling,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_kwargs,
39
+ validate_preprocess_arguments,
40
+ )
41
+ from ...utils import TensorType, logging
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+
47
+ class LevitImageProcessor(BaseImageProcessor):
48
+ r"""
49
+ Constructs a LeViT image processor.
50
+
51
+ Args:
52
+ do_resize (`bool`, *optional*, defaults to `True`):
53
+ Wwhether to resize the shortest edge of the input to int(256/224 *`size`). Can be overridden by the
54
+ `do_resize` parameter in the `preprocess` method.
55
+ size (`Dict[str, int]`, *optional*, defaults to `{"shortest_edge": 224}`):
56
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will
57
+ be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest
58
+ edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this
59
+ value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width,
60
+ size["shortest_egde"])`. Can be overridden by the `size` parameter in the `preprocess` method.
61
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
62
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
63
+ `preprocess` method.
64
+ do_center_crop (`bool`, *optional*, defaults to `True`):
65
+ Whether or not to center crop the input to `(crop_size["height"], crop_size["width"])`. Can be overridden
66
+ by the `do_center_crop` parameter in the `preprocess` method.
67
+ crop_size (`Dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
68
+ Desired image size after `center_crop`. Can be overridden by the `crop_size` parameter in the `preprocess`
69
+ method.
70
+ do_rescale (`bool`, *optional*, defaults to `True`):
71
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
72
+ `do_rescale` parameter in the `preprocess` method.
73
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
74
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
75
+ `preprocess` method.
76
+ do_normalize (`bool`, *optional*, defaults to `True`):
77
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
78
+ `preprocess` method.
79
+ image_mean (`List[int]`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
80
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
81
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
82
+ image_std (`List[int]`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
83
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
84
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
85
+ """
86
+
87
+ model_input_names = ["pixel_values"]
88
+
89
+ def __init__(
90
+ self,
91
+ do_resize: bool = True,
92
+ size: Dict[str, int] = None,
93
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
94
+ do_center_crop: bool = True,
95
+ crop_size: Dict[str, int] = None,
96
+ do_rescale: bool = True,
97
+ rescale_factor: Union[int, float] = 1 / 255,
98
+ do_normalize: bool = True,
99
+ image_mean: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN,
100
+ image_std: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD,
101
+ **kwargs,
102
+ ) -> None:
103
+ super().__init__(**kwargs)
104
+ size = size if size is not None else {"shortest_edge": 224}
105
+ size = get_size_dict(size, default_to_square=False)
106
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
107
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
108
+
109
+ self.do_resize = do_resize
110
+ self.size = size
111
+ self.resample = resample
112
+ self.do_center_crop = do_center_crop
113
+ self.crop_size = crop_size
114
+ self.do_rescale = do_rescale
115
+ self.rescale_factor = rescale_factor
116
+ self.do_normalize = do_normalize
117
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
118
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
119
+ self._valid_processor_keys = [
120
+ "images",
121
+ "do_resize",
122
+ "size",
123
+ "resample",
124
+ "do_center_crop",
125
+ "crop_size",
126
+ "do_rescale",
127
+ "rescale_factor",
128
+ "do_normalize",
129
+ "image_mean",
130
+ "image_std",
131
+ "return_tensors",
132
+ "data_format",
133
+ "input_data_format",
134
+ ]
135
+
136
+ def resize(
137
+ self,
138
+ image: np.ndarray,
139
+ size: Dict[str, int],
140
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
141
+ data_format: Optional[Union[str, ChannelDimension]] = None,
142
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
143
+ **kwargs,
144
+ ) -> np.ndarray:
145
+ """
146
+ Resize an image.
147
+
148
+ If size is a dict with keys "width" and "height", the image will be resized to `(size["height"],
149
+ size["width"])`.
150
+
151
+ If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`.
152
+ The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled
153
+ to `(size["shortest_egde"] * height / width, size["shortest_egde"])`.
154
+
155
+ Args:
156
+ image (`np.ndarray`):
157
+ Image to resize.
158
+ size (`Dict[str, int]`):
159
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
160
+ will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
161
+ `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
162
+ i.e, if height > width, then image will be rescaled to (size * height / width, size).
163
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
164
+ Resampling filter to use when resiizing the image.
165
+ data_format (`str` or `ChannelDimension`, *optional*):
166
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
167
+ input_data_format (`ChannelDimension` or `str`, *optional*):
168
+ The channel dimension format of the input image. If not provided, it will be inferred.
169
+ """
170
+ size_dict = get_size_dict(size, default_to_square=False)
171
+ # size_dict is a dict with either keys "height" and "width" or "shortest_edge"
172
+ if "shortest_edge" in size:
173
+ shortest_edge = int((256 / 224) * size["shortest_edge"])
174
+ output_size = get_resize_output_image_size(
175
+ image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
176
+ )
177
+ size_dict = {"height": output_size[0], "width": output_size[1]}
178
+ if "height" not in size_dict or "width" not in size_dict:
179
+ raise ValueError(
180
+ f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}"
181
+ )
182
+ return resize(
183
+ image,
184
+ size=(size_dict["height"], size_dict["width"]),
185
+ resample=resample,
186
+ data_format=data_format,
187
+ input_data_format=input_data_format,
188
+ **kwargs,
189
+ )
190
+
191
+ def preprocess(
192
+ self,
193
+ images: ImageInput,
194
+ do_resize: Optional[bool] = None,
195
+ size: Optional[Dict[str, int]] = None,
196
+ resample: PILImageResampling = None,
197
+ do_center_crop: Optional[bool] = None,
198
+ crop_size: Optional[Dict[str, int]] = None,
199
+ do_rescale: Optional[bool] = None,
200
+ rescale_factor: Optional[float] = None,
201
+ do_normalize: Optional[bool] = None,
202
+ image_mean: Optional[Union[float, Iterable[float]]] = None,
203
+ image_std: Optional[Union[float, Iterable[float]]] = None,
204
+ return_tensors: Optional[TensorType] = None,
205
+ data_format: ChannelDimension = ChannelDimension.FIRST,
206
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
207
+ **kwargs,
208
+ ) -> BatchFeature:
209
+ """
210
+ Preprocess an image or batch of images to be used as input to a LeViT model.
211
+
212
+ Args:
213
+ images (`ImageInput`):
214
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
215
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
216
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
217
+ Whether to resize the image.
218
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
219
+ Size of the output image after resizing. If size is a dict with keys "width" and "height", the image
220
+ will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value
221
+ `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value
222
+ i.e, if height > width, then image will be rescaled to (size * height / width, size).
223
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
224
+ Resampling filter to use when resiizing the image.
225
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
226
+ Whether to center crop the image.
227
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
228
+ Size of the output image after center cropping. Crops images to (crop_size["height"],
229
+ crop_size["width"]).
230
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
231
+ Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1.
232
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
233
+ Factor to rescale the image pixel values by.
234
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
235
+ Whether to normalize the image pixel values by `image_mean` and `image_std`.
236
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
237
+ Mean to normalize the image pixel values by.
238
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
239
+ Standard deviation to normalize the image pixel values by.
240
+ return_tensors (`str` or `TensorType`, *optional*):
241
+ The type of tensors to return. Can be one of:
242
+ - Unset: Return a list of `np.ndarray`.
243
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
244
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
245
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
246
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
247
+ data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
248
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
249
+ image is used. Can be one of:
250
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
251
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
252
+ input_data_format (`ChannelDimension` or `str`, *optional*):
253
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
254
+ from the input image. Can be one of:
255
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
256
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
257
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
258
+ """
259
+ do_resize = do_resize if do_resize is not None else self.do_resize
260
+ resample = resample if resample is not None else self.resample
261
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
262
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
263
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
264
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
265
+ image_mean = image_mean if image_mean is not None else self.image_mean
266
+ image_std = image_std if image_std is not None else self.image_std
267
+
268
+ size = size if size is not None else self.size
269
+ size = get_size_dict(size, default_to_square=False)
270
+ crop_size = crop_size if crop_size is not None else self.crop_size
271
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
272
+ images = make_list_of_images(images)
273
+
274
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
275
+
276
+ if not valid_images(images):
277
+ raise ValueError(
278
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
279
+ "torch.Tensor, tf.Tensor or jax.ndarray."
280
+ )
281
+ validate_preprocess_arguments(
282
+ do_rescale=do_rescale,
283
+ rescale_factor=rescale_factor,
284
+ do_normalize=do_normalize,
285
+ image_mean=image_mean,
286
+ image_std=image_std,
287
+ do_center_crop=do_center_crop,
288
+ crop_size=crop_size,
289
+ do_resize=do_resize,
290
+ size=size,
291
+ resample=resample,
292
+ )
293
+ # All transformations expect numpy arrays.
294
+ images = [to_numpy_array(image) for image in images]
295
+
296
+ if is_scaled_image(images[0]) and do_rescale:
297
+ logger.warning_once(
298
+ "It looks like you are trying to rescale already rescaled images. If the input"
299
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
300
+ )
301
+
302
+ if input_data_format is None:
303
+ # We assume that all images have the same channel dimension format.
304
+ input_data_format = infer_channel_dimension_format(images[0])
305
+
306
+ if do_resize:
307
+ images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images]
308
+
309
+ if do_center_crop:
310
+ images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images]
311
+
312
+ if do_rescale:
313
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
314
+
315
+ if do_normalize:
316
+ images = [
317
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
318
+ ]
319
+
320
+ images = [
321
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
322
+ ]
323
+
324
+ data = {"pixel_values": images}
325
+ return BatchFeature(data=data, tensor_type=return_tensors)
env-llmeval/lib/python3.10/site-packages/transformers/models/levit/modeling_levit.py ADDED
@@ -0,0 +1,739 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch LeViT model."""
16
+
17
+ import itertools
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithNoAttention,
28
+ BaseModelOutputWithPoolingAndNoAttention,
29
+ ImageClassifierOutputWithNoAttention,
30
+ ModelOutput,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
34
+ from .configuration_levit import LevitConfig
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ # General docstring
40
+ _CONFIG_FOR_DOC = "LevitConfig"
41
+
42
+ # Base docstring
43
+ _CHECKPOINT_FOR_DOC = "facebook/levit-128S"
44
+ _EXPECTED_OUTPUT_SHAPE = [1, 16, 384]
45
+
46
+ # Image classification docstring
47
+ _IMAGE_CLASS_CHECKPOINT = "facebook/levit-128S"
48
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
49
+
50
+ LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
51
+ "facebook/levit-128S",
52
+ # See all LeViT models at https://huggingface.co/models?filter=levit
53
+ ]
54
+
55
+
56
+ @dataclass
57
+ class LevitForImageClassificationWithTeacherOutput(ModelOutput):
58
+ """
59
+ Output type of [`LevitForImageClassificationWithTeacher`].
60
+
61
+ Args:
62
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
63
+ Prediction scores as the average of the `cls_logits` and `distillation_logits`.
64
+ cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
65
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
66
+ class token).
67
+ distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
68
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
69
+ distillation token).
70
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
71
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
72
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
73
+ plus the initial embedding outputs.
74
+ """
75
+
76
+ logits: torch.FloatTensor = None
77
+ cls_logits: torch.FloatTensor = None
78
+ distillation_logits: torch.FloatTensor = None
79
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
80
+
81
+
82
+ class LevitConvEmbeddings(nn.Module):
83
+ """
84
+ LeViT Conv Embeddings with Batch Norm, used in the initial patch embedding layer.
85
+ """
86
+
87
+ def __init__(
88
+ self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bn_weight_init=1
89
+ ):
90
+ super().__init__()
91
+ self.convolution = nn.Conv2d(
92
+ in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, groups=groups, bias=False
93
+ )
94
+ self.batch_norm = nn.BatchNorm2d(out_channels)
95
+
96
+ def forward(self, embeddings):
97
+ embeddings = self.convolution(embeddings)
98
+ embeddings = self.batch_norm(embeddings)
99
+ return embeddings
100
+
101
+
102
+ class LevitPatchEmbeddings(nn.Module):
103
+ """
104
+ LeViT patch embeddings, for final embeddings to be passed to transformer blocks. It consists of multiple
105
+ `LevitConvEmbeddings`.
106
+ """
107
+
108
+ def __init__(self, config):
109
+ super().__init__()
110
+ self.embedding_layer_1 = LevitConvEmbeddings(
111
+ config.num_channels, config.hidden_sizes[0] // 8, config.kernel_size, config.stride, config.padding
112
+ )
113
+ self.activation_layer_1 = nn.Hardswish()
114
+
115
+ self.embedding_layer_2 = LevitConvEmbeddings(
116
+ config.hidden_sizes[0] // 8, config.hidden_sizes[0] // 4, config.kernel_size, config.stride, config.padding
117
+ )
118
+ self.activation_layer_2 = nn.Hardswish()
119
+
120
+ self.embedding_layer_3 = LevitConvEmbeddings(
121
+ config.hidden_sizes[0] // 4, config.hidden_sizes[0] // 2, config.kernel_size, config.stride, config.padding
122
+ )
123
+ self.activation_layer_3 = nn.Hardswish()
124
+
125
+ self.embedding_layer_4 = LevitConvEmbeddings(
126
+ config.hidden_sizes[0] // 2, config.hidden_sizes[0], config.kernel_size, config.stride, config.padding
127
+ )
128
+ self.num_channels = config.num_channels
129
+
130
+ def forward(self, pixel_values):
131
+ num_channels = pixel_values.shape[1]
132
+ if num_channels != self.num_channels:
133
+ raise ValueError(
134
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
135
+ )
136
+ embeddings = self.embedding_layer_1(pixel_values)
137
+ embeddings = self.activation_layer_1(embeddings)
138
+ embeddings = self.embedding_layer_2(embeddings)
139
+ embeddings = self.activation_layer_2(embeddings)
140
+ embeddings = self.embedding_layer_3(embeddings)
141
+ embeddings = self.activation_layer_3(embeddings)
142
+ embeddings = self.embedding_layer_4(embeddings)
143
+ return embeddings.flatten(2).transpose(1, 2)
144
+
145
+
146
+ class MLPLayerWithBN(nn.Module):
147
+ def __init__(self, input_dim, output_dim, bn_weight_init=1):
148
+ super().__init__()
149
+ self.linear = nn.Linear(in_features=input_dim, out_features=output_dim, bias=False)
150
+ self.batch_norm = nn.BatchNorm1d(output_dim)
151
+
152
+ def forward(self, hidden_state):
153
+ hidden_state = self.linear(hidden_state)
154
+ hidden_state = self.batch_norm(hidden_state.flatten(0, 1)).reshape_as(hidden_state)
155
+ return hidden_state
156
+
157
+
158
+ class LevitSubsample(nn.Module):
159
+ def __init__(self, stride, resolution):
160
+ super().__init__()
161
+ self.stride = stride
162
+ self.resolution = resolution
163
+
164
+ def forward(self, hidden_state):
165
+ batch_size, _, channels = hidden_state.shape
166
+ hidden_state = hidden_state.view(batch_size, self.resolution, self.resolution, channels)[
167
+ :, :: self.stride, :: self.stride
168
+ ].reshape(batch_size, -1, channels)
169
+ return hidden_state
170
+
171
+
172
+ class LevitAttention(nn.Module):
173
+ def __init__(self, hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution):
174
+ super().__init__()
175
+ self.num_attention_heads = num_attention_heads
176
+ self.scale = key_dim**-0.5
177
+ self.key_dim = key_dim
178
+ self.attention_ratio = attention_ratio
179
+ self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads * 2
180
+ self.out_dim_projection = attention_ratio * key_dim * num_attention_heads
181
+
182
+ self.queries_keys_values = MLPLayerWithBN(hidden_sizes, self.out_dim_keys_values)
183
+ self.activation = nn.Hardswish()
184
+ self.projection = MLPLayerWithBN(self.out_dim_projection, hidden_sizes, bn_weight_init=0)
185
+
186
+ points = list(itertools.product(range(resolution), range(resolution)))
187
+ len_points = len(points)
188
+ attention_offsets, indices = {}, []
189
+ for p1 in points:
190
+ for p2 in points:
191
+ offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
192
+ if offset not in attention_offsets:
193
+ attention_offsets[offset] = len(attention_offsets)
194
+ indices.append(attention_offsets[offset])
195
+
196
+ self.attention_bias_cache = {}
197
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets)))
198
+ self.register_buffer(
199
+ "attention_bias_idxs", torch.LongTensor(indices).view(len_points, len_points), persistent=False
200
+ )
201
+
202
+ @torch.no_grad()
203
+ def train(self, mode=True):
204
+ super().train(mode)
205
+ if mode and self.attention_bias_cache:
206
+ self.attention_bias_cache = {} # clear ab cache
207
+
208
+ def get_attention_biases(self, device):
209
+ if self.training:
210
+ return self.attention_biases[:, self.attention_bias_idxs]
211
+ else:
212
+ device_key = str(device)
213
+ if device_key not in self.attention_bias_cache:
214
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
215
+ return self.attention_bias_cache[device_key]
216
+
217
+ def forward(self, hidden_state):
218
+ batch_size, seq_length, _ = hidden_state.shape
219
+ queries_keys_values = self.queries_keys_values(hidden_state)
220
+ query, key, value = queries_keys_values.view(batch_size, seq_length, self.num_attention_heads, -1).split(
221
+ [self.key_dim, self.key_dim, self.attention_ratio * self.key_dim], dim=3
222
+ )
223
+ query = query.permute(0, 2, 1, 3)
224
+ key = key.permute(0, 2, 1, 3)
225
+ value = value.permute(0, 2, 1, 3)
226
+
227
+ attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device)
228
+ attention = attention.softmax(dim=-1)
229
+ hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, seq_length, self.out_dim_projection)
230
+ hidden_state = self.projection(self.activation(hidden_state))
231
+ return hidden_state
232
+
233
+
234
+ class LevitAttentionSubsample(nn.Module):
235
+ def __init__(
236
+ self,
237
+ input_dim,
238
+ output_dim,
239
+ key_dim,
240
+ num_attention_heads,
241
+ attention_ratio,
242
+ stride,
243
+ resolution_in,
244
+ resolution_out,
245
+ ):
246
+ super().__init__()
247
+ self.num_attention_heads = num_attention_heads
248
+ self.scale = key_dim**-0.5
249
+ self.key_dim = key_dim
250
+ self.attention_ratio = attention_ratio
251
+ self.out_dim_keys_values = attention_ratio * key_dim * num_attention_heads + key_dim * num_attention_heads
252
+ self.out_dim_projection = attention_ratio * key_dim * num_attention_heads
253
+ self.resolution_out = resolution_out
254
+ # resolution_in is the intial resolution, resoloution_out is final resolution after downsampling
255
+ self.keys_values = MLPLayerWithBN(input_dim, self.out_dim_keys_values)
256
+ self.queries_subsample = LevitSubsample(stride, resolution_in)
257
+ self.queries = MLPLayerWithBN(input_dim, key_dim * num_attention_heads)
258
+ self.activation = nn.Hardswish()
259
+ self.projection = MLPLayerWithBN(self.out_dim_projection, output_dim)
260
+
261
+ self.attention_bias_cache = {}
262
+
263
+ points = list(itertools.product(range(resolution_in), range(resolution_in)))
264
+ points_ = list(itertools.product(range(resolution_out), range(resolution_out)))
265
+ len_points, len_points_ = len(points), len(points_)
266
+ attention_offsets, indices = {}, []
267
+ for p1 in points_:
268
+ for p2 in points:
269
+ size = 1
270
+ offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2), abs(p1[1] * stride - p2[1] + (size - 1) / 2))
271
+ if offset not in attention_offsets:
272
+ attention_offsets[offset] = len(attention_offsets)
273
+ indices.append(attention_offsets[offset])
274
+
275
+ self.attention_biases = torch.nn.Parameter(torch.zeros(num_attention_heads, len(attention_offsets)))
276
+ self.register_buffer(
277
+ "attention_bias_idxs", torch.LongTensor(indices).view(len_points_, len_points), persistent=False
278
+ )
279
+
280
+ @torch.no_grad()
281
+ def train(self, mode=True):
282
+ super().train(mode)
283
+ if mode and self.attention_bias_cache:
284
+ self.attention_bias_cache = {} # clear ab cache
285
+
286
+ def get_attention_biases(self, device):
287
+ if self.training:
288
+ return self.attention_biases[:, self.attention_bias_idxs]
289
+ else:
290
+ device_key = str(device)
291
+ if device_key not in self.attention_bias_cache:
292
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
293
+ return self.attention_bias_cache[device_key]
294
+
295
+ def forward(self, hidden_state):
296
+ batch_size, seq_length, _ = hidden_state.shape
297
+ key, value = (
298
+ self.keys_values(hidden_state)
299
+ .view(batch_size, seq_length, self.num_attention_heads, -1)
300
+ .split([self.key_dim, self.attention_ratio * self.key_dim], dim=3)
301
+ )
302
+ key = key.permute(0, 2, 1, 3)
303
+ value = value.permute(0, 2, 1, 3)
304
+
305
+ query = self.queries(self.queries_subsample(hidden_state))
306
+ query = query.view(batch_size, self.resolution_out**2, self.num_attention_heads, self.key_dim).permute(
307
+ 0, 2, 1, 3
308
+ )
309
+
310
+ attention = query @ key.transpose(-2, -1) * self.scale + self.get_attention_biases(hidden_state.device)
311
+ attention = attention.softmax(dim=-1)
312
+ hidden_state = (attention @ value).transpose(1, 2).reshape(batch_size, -1, self.out_dim_projection)
313
+ hidden_state = self.projection(self.activation(hidden_state))
314
+ return hidden_state
315
+
316
+
317
+ class LevitMLPLayer(nn.Module):
318
+ """
319
+ MLP Layer with `2X` expansion in contrast to ViT with `4X`.
320
+ """
321
+
322
+ def __init__(self, input_dim, hidden_dim):
323
+ super().__init__()
324
+ self.linear_up = MLPLayerWithBN(input_dim, hidden_dim)
325
+ self.activation = nn.Hardswish()
326
+ self.linear_down = MLPLayerWithBN(hidden_dim, input_dim)
327
+
328
+ def forward(self, hidden_state):
329
+ hidden_state = self.linear_up(hidden_state)
330
+ hidden_state = self.activation(hidden_state)
331
+ hidden_state = self.linear_down(hidden_state)
332
+ return hidden_state
333
+
334
+
335
+ class LevitResidualLayer(nn.Module):
336
+ """
337
+ Residual Block for LeViT
338
+ """
339
+
340
+ def __init__(self, module, drop_rate):
341
+ super().__init__()
342
+ self.module = module
343
+ self.drop_rate = drop_rate
344
+
345
+ def forward(self, hidden_state):
346
+ if self.training and self.drop_rate > 0:
347
+ rnd = torch.rand(hidden_state.size(0), 1, 1, device=hidden_state.device)
348
+ rnd = rnd.ge_(self.drop_rate).div(1 - self.drop_rate).detach()
349
+ hidden_state = hidden_state + self.module(hidden_state) * rnd
350
+ return hidden_state
351
+ else:
352
+ hidden_state = hidden_state + self.module(hidden_state)
353
+ return hidden_state
354
+
355
+
356
+ class LevitStage(nn.Module):
357
+ """
358
+ LeViT Stage consisting of `LevitMLPLayer` and `LevitAttention` layers.
359
+ """
360
+
361
+ def __init__(
362
+ self,
363
+ config,
364
+ idx,
365
+ hidden_sizes,
366
+ key_dim,
367
+ depths,
368
+ num_attention_heads,
369
+ attention_ratio,
370
+ mlp_ratio,
371
+ down_ops,
372
+ resolution_in,
373
+ ):
374
+ super().__init__()
375
+ self.layers = []
376
+ self.config = config
377
+ self.resolution_in = resolution_in
378
+ # resolution_in is the intial resolution, resolution_out is final resolution after downsampling
379
+ for _ in range(depths):
380
+ self.layers.append(
381
+ LevitResidualLayer(
382
+ LevitAttention(hidden_sizes, key_dim, num_attention_heads, attention_ratio, resolution_in),
383
+ self.config.drop_path_rate,
384
+ )
385
+ )
386
+ if mlp_ratio > 0:
387
+ hidden_dim = hidden_sizes * mlp_ratio
388
+ self.layers.append(
389
+ LevitResidualLayer(LevitMLPLayer(hidden_sizes, hidden_dim), self.config.drop_path_rate)
390
+ )
391
+
392
+ if down_ops[0] == "Subsample":
393
+ self.resolution_out = (self.resolution_in - 1) // down_ops[5] + 1
394
+ self.layers.append(
395
+ LevitAttentionSubsample(
396
+ *self.config.hidden_sizes[idx : idx + 2],
397
+ key_dim=down_ops[1],
398
+ num_attention_heads=down_ops[2],
399
+ attention_ratio=down_ops[3],
400
+ stride=down_ops[5],
401
+ resolution_in=resolution_in,
402
+ resolution_out=self.resolution_out,
403
+ )
404
+ )
405
+ self.resolution_in = self.resolution_out
406
+ if down_ops[4] > 0:
407
+ hidden_dim = self.config.hidden_sizes[idx + 1] * down_ops[4]
408
+ self.layers.append(
409
+ LevitResidualLayer(
410
+ LevitMLPLayer(self.config.hidden_sizes[idx + 1], hidden_dim), self.config.drop_path_rate
411
+ )
412
+ )
413
+
414
+ self.layers = nn.ModuleList(self.layers)
415
+
416
+ def get_resolution(self):
417
+ return self.resolution_in
418
+
419
+ def forward(self, hidden_state):
420
+ for layer in self.layers:
421
+ hidden_state = layer(hidden_state)
422
+ return hidden_state
423
+
424
+
425
+ class LevitEncoder(nn.Module):
426
+ """
427
+ LeViT Encoder consisting of multiple `LevitStage` stages.
428
+ """
429
+
430
+ def __init__(self, config):
431
+ super().__init__()
432
+ self.config = config
433
+ resolution = self.config.image_size // self.config.patch_size
434
+ self.stages = []
435
+ self.config.down_ops.append([""])
436
+
437
+ for stage_idx in range(len(config.depths)):
438
+ stage = LevitStage(
439
+ config,
440
+ stage_idx,
441
+ config.hidden_sizes[stage_idx],
442
+ config.key_dim[stage_idx],
443
+ config.depths[stage_idx],
444
+ config.num_attention_heads[stage_idx],
445
+ config.attention_ratio[stage_idx],
446
+ config.mlp_ratio[stage_idx],
447
+ config.down_ops[stage_idx],
448
+ resolution,
449
+ )
450
+ resolution = stage.get_resolution()
451
+ self.stages.append(stage)
452
+
453
+ self.stages = nn.ModuleList(self.stages)
454
+
455
+ def forward(self, hidden_state, output_hidden_states=False, return_dict=True):
456
+ all_hidden_states = () if output_hidden_states else None
457
+
458
+ for stage in self.stages:
459
+ if output_hidden_states:
460
+ all_hidden_states = all_hidden_states + (hidden_state,)
461
+ hidden_state = stage(hidden_state)
462
+
463
+ if output_hidden_states:
464
+ all_hidden_states = all_hidden_states + (hidden_state,)
465
+ if not return_dict:
466
+ return tuple(v for v in [hidden_state, all_hidden_states] if v is not None)
467
+
468
+ return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
469
+
470
+
471
+ class LevitClassificationLayer(nn.Module):
472
+ """
473
+ LeViT Classification Layer
474
+ """
475
+
476
+ def __init__(self, input_dim, output_dim):
477
+ super().__init__()
478
+ self.batch_norm = nn.BatchNorm1d(input_dim)
479
+ self.linear = nn.Linear(input_dim, output_dim)
480
+
481
+ def forward(self, hidden_state):
482
+ hidden_state = self.batch_norm(hidden_state)
483
+ logits = self.linear(hidden_state)
484
+ return logits
485
+
486
+
487
+ class LevitPreTrainedModel(PreTrainedModel):
488
+ """
489
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
490
+ models.
491
+ """
492
+
493
+ config_class = LevitConfig
494
+ base_model_prefix = "levit"
495
+ main_input_name = "pixel_values"
496
+
497
+ def _init_weights(self, module):
498
+ """Initialize the weights"""
499
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
500
+ # Slightly different from the TF version which uses truncated_normal for initialization
501
+ # cf https://github.com/pytorch/pytorch/pull/5617
502
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
503
+ if module.bias is not None:
504
+ module.bias.data.zero_()
505
+ elif isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):
506
+ module.bias.data.zero_()
507
+ module.weight.data.fill_(1.0)
508
+
509
+
510
+ LEVIT_START_DOCSTRING = r"""
511
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
512
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
513
+ behavior.
514
+
515
+ Parameters:
516
+ config ([`LevitConfig`]): Model configuration class with all the parameters of the model.
517
+ Initializing with a config file does not load the weights associated with the model, only the
518
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
519
+ """
520
+
521
+ LEVIT_INPUTS_DOCSTRING = r"""
522
+ Args:
523
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
524
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
525
+ [`LevitImageProcessor.__call__`] for details.
526
+
527
+ output_hidden_states (`bool`, *optional*):
528
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
529
+ more detail.
530
+ return_dict (`bool`, *optional*):
531
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
532
+ """
533
+
534
+
535
+ @add_start_docstrings(
536
+ "The bare Levit model outputting raw features without any specific head on top.",
537
+ LEVIT_START_DOCSTRING,
538
+ )
539
+ class LevitModel(LevitPreTrainedModel):
540
+ def __init__(self, config):
541
+ super().__init__(config)
542
+ self.config = config
543
+ self.patch_embeddings = LevitPatchEmbeddings(config)
544
+ self.encoder = LevitEncoder(config)
545
+ # Initialize weights and apply final processing
546
+ self.post_init()
547
+
548
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
549
+ @add_code_sample_docstrings(
550
+ checkpoint=_CHECKPOINT_FOR_DOC,
551
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
552
+ config_class=_CONFIG_FOR_DOC,
553
+ modality="vision",
554
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
555
+ )
556
+ def forward(
557
+ self,
558
+ pixel_values: torch.FloatTensor = None,
559
+ output_hidden_states: Optional[bool] = None,
560
+ return_dict: Optional[bool] = None,
561
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
562
+ output_hidden_states = (
563
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
564
+ )
565
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
566
+
567
+ if pixel_values is None:
568
+ raise ValueError("You have to specify pixel_values")
569
+
570
+ embeddings = self.patch_embeddings(pixel_values)
571
+ encoder_outputs = self.encoder(
572
+ embeddings,
573
+ output_hidden_states=output_hidden_states,
574
+ return_dict=return_dict,
575
+ )
576
+
577
+ last_hidden_state = encoder_outputs[0]
578
+
579
+ # global average pooling, (batch_size, seq_length, hidden_sizes) -> (batch_size, hidden_sizes)
580
+ pooled_output = last_hidden_state.mean(dim=1)
581
+
582
+ if not return_dict:
583
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
584
+
585
+ return BaseModelOutputWithPoolingAndNoAttention(
586
+ last_hidden_state=last_hidden_state,
587
+ pooler_output=pooled_output,
588
+ hidden_states=encoder_outputs.hidden_states,
589
+ )
590
+
591
+
592
+ @add_start_docstrings(
593
+ """
594
+ Levit Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
595
+ ImageNet.
596
+ """,
597
+ LEVIT_START_DOCSTRING,
598
+ )
599
+ class LevitForImageClassification(LevitPreTrainedModel):
600
+ def __init__(self, config):
601
+ super().__init__(config)
602
+ self.config = config
603
+ self.num_labels = config.num_labels
604
+ self.levit = LevitModel(config)
605
+
606
+ # Classifier head
607
+ self.classifier = (
608
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
609
+ if config.num_labels > 0
610
+ else torch.nn.Identity()
611
+ )
612
+
613
+ # Initialize weights and apply final processing
614
+ self.post_init()
615
+
616
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
617
+ @add_code_sample_docstrings(
618
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
619
+ output_type=ImageClassifierOutputWithNoAttention,
620
+ config_class=_CONFIG_FOR_DOC,
621
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
622
+ )
623
+ def forward(
624
+ self,
625
+ pixel_values: torch.FloatTensor = None,
626
+ labels: Optional[torch.LongTensor] = None,
627
+ output_hidden_states: Optional[bool] = None,
628
+ return_dict: Optional[bool] = None,
629
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
630
+ r"""
631
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
632
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
633
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
634
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
635
+ """
636
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
637
+
638
+ outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
639
+
640
+ sequence_output = outputs[0]
641
+ sequence_output = sequence_output.mean(1)
642
+ logits = self.classifier(sequence_output)
643
+
644
+ loss = None
645
+ if labels is not None:
646
+ if self.config.problem_type is None:
647
+ if self.num_labels == 1:
648
+ self.config.problem_type = "regression"
649
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
650
+ self.config.problem_type = "single_label_classification"
651
+ else:
652
+ self.config.problem_type = "multi_label_classification"
653
+
654
+ if self.config.problem_type == "regression":
655
+ loss_fct = MSELoss()
656
+ if self.num_labels == 1:
657
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
658
+ else:
659
+ loss = loss_fct(logits, labels)
660
+ elif self.config.problem_type == "single_label_classification":
661
+ loss_fct = CrossEntropyLoss()
662
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
663
+ elif self.config.problem_type == "multi_label_classification":
664
+ loss_fct = BCEWithLogitsLoss()
665
+ loss = loss_fct(logits, labels)
666
+ if not return_dict:
667
+ output = (logits,) + outputs[2:]
668
+ return ((loss,) + output) if loss is not None else output
669
+
670
+ return ImageClassifierOutputWithNoAttention(
671
+ loss=loss,
672
+ logits=logits,
673
+ hidden_states=outputs.hidden_states,
674
+ )
675
+
676
+
677
+ @add_start_docstrings(
678
+ """
679
+ LeViT Model transformer with image classification heads on top (a linear layer on top of the final hidden state and
680
+ a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet. .. warning::
681
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
682
+ supported.
683
+ """,
684
+ LEVIT_START_DOCSTRING,
685
+ )
686
+ class LevitForImageClassificationWithTeacher(LevitPreTrainedModel):
687
+ def __init__(self, config):
688
+ super().__init__(config)
689
+ self.config = config
690
+ self.num_labels = config.num_labels
691
+ self.levit = LevitModel(config)
692
+
693
+ # Classifier head
694
+ self.classifier = (
695
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
696
+ if config.num_labels > 0
697
+ else torch.nn.Identity()
698
+ )
699
+ self.classifier_distill = (
700
+ LevitClassificationLayer(config.hidden_sizes[-1], config.num_labels)
701
+ if config.num_labels > 0
702
+ else torch.nn.Identity()
703
+ )
704
+
705
+ # Initialize weights and apply final processing
706
+ self.post_init()
707
+
708
+ @add_start_docstrings_to_model_forward(LEVIT_INPUTS_DOCSTRING)
709
+ @add_code_sample_docstrings(
710
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
711
+ output_type=LevitForImageClassificationWithTeacherOutput,
712
+ config_class=_CONFIG_FOR_DOC,
713
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
714
+ )
715
+ def forward(
716
+ self,
717
+ pixel_values: torch.FloatTensor = None,
718
+ output_hidden_states: Optional[bool] = None,
719
+ return_dict: Optional[bool] = None,
720
+ ) -> Union[Tuple, LevitForImageClassificationWithTeacherOutput]:
721
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
722
+
723
+ outputs = self.levit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
724
+
725
+ sequence_output = outputs[0]
726
+ sequence_output = sequence_output.mean(1)
727
+ cls_logits, distill_logits = self.classifier(sequence_output), self.classifier_distill(sequence_output)
728
+ logits = (cls_logits + distill_logits) / 2
729
+
730
+ if not return_dict:
731
+ output = (logits, cls_logits, distill_logits) + outputs[2:]
732
+ return output
733
+
734
+ return LevitForImageClassificationWithTeacherOutput(
735
+ logits=logits,
736
+ cls_logits=cls_logits,
737
+ distillation_logits=distill_logits,
738
+ hidden_states=outputs.hidden_states,
739
+ )
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_torch_available,
21
+ is_vision_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
27
+ }
28
+
29
+ try:
30
+ if not is_vision_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["feature_extraction_mobilevit"] = ["MobileViTFeatureExtractor"]
36
+ _import_structure["image_processing_mobilevit"] = ["MobileViTImageProcessor"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_mobilevit"] = [
45
+ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "MobileViTForImageClassification",
47
+ "MobileViTForSemanticSegmentation",
48
+ "MobileViTModel",
49
+ "MobileViTPreTrainedModel",
50
+ ]
51
+
52
+ try:
53
+ if not is_tf_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_tf_mobilevit"] = [
59
+ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
60
+ "TFMobileViTForImageClassification",
61
+ "TFMobileViTForSemanticSegmentation",
62
+ "TFMobileViTModel",
63
+ "TFMobileViTPreTrainedModel",
64
+ ]
65
+
66
+ if TYPE_CHECKING:
67
+ from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
68
+
69
+ try:
70
+ if not is_vision_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .feature_extraction_mobilevit import MobileViTFeatureExtractor
76
+ from .image_processing_mobilevit import MobileViTImageProcessor
77
+
78
+ try:
79
+ if not is_torch_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ from .modeling_mobilevit import (
85
+ MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
86
+ MobileViTForImageClassification,
87
+ MobileViTForSemanticSegmentation,
88
+ MobileViTModel,
89
+ MobileViTPreTrainedModel,
90
+ )
91
+
92
+ try:
93
+ if not is_tf_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_tf_mobilevit import (
99
+ TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
100
+ TFMobileViTForImageClassification,
101
+ TFMobileViTForSemanticSegmentation,
102
+ TFMobileViTModel,
103
+ TFMobileViTPreTrainedModel,
104
+ )
105
+
106
+
107
+ else:
108
+ import sys
109
+
110
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc ADDED
Binary file (8.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc ADDED
Binary file (40.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ MobileViT model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from packaging import version
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ...onnx import OnnxConfig
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
30
+ "apple/mobilevit-small": "https://huggingface.co/apple/mobilevit-small/resolve/main/config.json",
31
+ "apple/mobilevit-x-small": "https://huggingface.co/apple/mobilevit-x-small/resolve/main/config.json",
32
+ "apple/mobilevit-xx-small": "https://huggingface.co/apple/mobilevit-xx-small/resolve/main/config.json",
33
+ "apple/deeplabv3-mobilevit-small": (
34
+ "https://huggingface.co/apple/deeplabv3-mobilevit-small/resolve/main/config.json"
35
+ ),
36
+ "apple/deeplabv3-mobilevit-x-small": (
37
+ "https://huggingface.co/apple/deeplabv3-mobilevit-x-small/resolve/main/config.json"
38
+ ),
39
+ "apple/deeplabv3-mobilevit-xx-small": (
40
+ "https://huggingface.co/apple/deeplabv3-mobilevit-xx-small/resolve/main/config.json"
41
+ ),
42
+ # See all MobileViT models at https://huggingface.co/models?filter=mobilevit
43
+ }
44
+
45
+
46
+ class MobileViTConfig(PretrainedConfig):
47
+ r"""
48
+ This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a
49
+ MobileViT model according to the specified arguments, defining the model architecture. Instantiating a
50
+ configuration with the defaults will yield a similar configuration to that of the MobileViT
51
+ [apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture.
52
+
53
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
54
+ documentation from [`PretrainedConfig`] for more information.
55
+
56
+ Args:
57
+ num_channels (`int`, *optional*, defaults to 3):
58
+ The number of input channels.
59
+ image_size (`int`, *optional*, defaults to 256):
60
+ The size (resolution) of each image.
61
+ patch_size (`int`, *optional*, defaults to 2):
62
+ The size (resolution) of each patch.
63
+ hidden_sizes (`List[int]`, *optional*, defaults to `[144, 192, 240]`):
64
+ Dimensionality (hidden size) of the Transformer encoders at each stage.
65
+ neck_hidden_sizes (`List[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`):
66
+ The number of channels for the feature maps of the backbone.
67
+ num_attention_heads (`int`, *optional*, defaults to 4):
68
+ Number of attention heads for each attention layer in the Transformer encoder.
69
+ mlp_ratio (`float`, *optional*, defaults to 2.0):
70
+ The ratio of the number of channels in the output of the MLP to the number of channels in the input.
71
+ expand_ratio (`float`, *optional*, defaults to 4.0):
72
+ Expansion factor for the MobileNetv2 layers.
73
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
74
+ The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
75
+ conv_kernel_size (`int`, *optional*, defaults to 3):
76
+ The size of the convolutional kernel in the MobileViT layer.
77
+ output_stride (`int`, *optional*, defaults to 32):
78
+ The ratio of the spatial resolution of the output to the resolution of the input image.
79
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
80
+ The dropout probability for all fully connected layers in the Transformer encoder.
81
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
82
+ The dropout ratio for the attention probabilities.
83
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
84
+ The dropout ratio for attached classifiers.
85
+ initializer_range (`float`, *optional*, defaults to 0.02):
86
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
87
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
88
+ The epsilon used by the layer normalization layers.
89
+ qkv_bias (`bool`, *optional*, defaults to `True`):
90
+ Whether to add a bias to the queries, keys and values.
91
+ aspp_out_channels (`int`, *optional*, defaults to 256):
92
+ Number of output channels used in the ASPP layer for semantic segmentation.
93
+ atrous_rates (`List[int]`, *optional*, defaults to `[6, 12, 18]`):
94
+ Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
95
+ aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
96
+ The dropout ratio for the ASPP layer for semantic segmentation.
97
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
98
+ The index that is ignored by the loss function of the semantic segmentation model.
99
+
100
+ Example:
101
+
102
+ ```python
103
+ >>> from transformers import MobileViTConfig, MobileViTModel
104
+
105
+ >>> # Initializing a mobilevit-small style configuration
106
+ >>> configuration = MobileViTConfig()
107
+
108
+ >>> # Initializing a model from the mobilevit-small style configuration
109
+ >>> model = MobileViTModel(configuration)
110
+
111
+ >>> # Accessing the model configuration
112
+ >>> configuration = model.config
113
+ ```"""
114
+
115
+ model_type = "mobilevit"
116
+
117
+ def __init__(
118
+ self,
119
+ num_channels=3,
120
+ image_size=256,
121
+ patch_size=2,
122
+ hidden_sizes=[144, 192, 240],
123
+ neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640],
124
+ num_attention_heads=4,
125
+ mlp_ratio=2.0,
126
+ expand_ratio=4.0,
127
+ hidden_act="silu",
128
+ conv_kernel_size=3,
129
+ output_stride=32,
130
+ hidden_dropout_prob=0.1,
131
+ attention_probs_dropout_prob=0.0,
132
+ classifier_dropout_prob=0.1,
133
+ initializer_range=0.02,
134
+ layer_norm_eps=1e-5,
135
+ qkv_bias=True,
136
+ aspp_out_channels=256,
137
+ atrous_rates=[6, 12, 18],
138
+ aspp_dropout_prob=0.1,
139
+ semantic_loss_ignore_index=255,
140
+ **kwargs,
141
+ ):
142
+ super().__init__(**kwargs)
143
+
144
+ self.num_channels = num_channels
145
+ self.image_size = image_size
146
+ self.patch_size = patch_size
147
+ self.hidden_sizes = hidden_sizes
148
+ self.neck_hidden_sizes = neck_hidden_sizes
149
+ self.num_attention_heads = num_attention_heads
150
+ self.mlp_ratio = mlp_ratio
151
+ self.expand_ratio = expand_ratio
152
+ self.hidden_act = hidden_act
153
+ self.conv_kernel_size = conv_kernel_size
154
+ self.output_stride = output_stride
155
+ self.hidden_dropout_prob = hidden_dropout_prob
156
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
157
+ self.classifier_dropout_prob = classifier_dropout_prob
158
+ self.initializer_range = initializer_range
159
+ self.layer_norm_eps = layer_norm_eps
160
+ self.qkv_bias = qkv_bias
161
+
162
+ # decode head attributes for semantic segmentation
163
+ self.aspp_out_channels = aspp_out_channels
164
+ self.atrous_rates = atrous_rates
165
+ self.aspp_dropout_prob = aspp_dropout_prob
166
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
167
+
168
+
169
+ class MobileViTOnnxConfig(OnnxConfig):
170
+ torch_onnx_minimum_version = version.parse("1.11")
171
+
172
+ @property
173
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
174
+ return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})])
175
+
176
+ @property
177
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
178
+ if self.task == "image-classification":
179
+ return OrderedDict([("logits", {0: "batch"})])
180
+ else:
181
+ return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
182
+
183
+ @property
184
+ def atol_for_validation(self) -> float:
185
+ return 1e-4
env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert MobileViT checkpoints from the ml-cvnets library."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import requests
23
+ import torch
24
+ from huggingface_hub import hf_hub_download
25
+ from PIL import Image
26
+
27
+ from transformers import (
28
+ MobileViTConfig,
29
+ MobileViTForImageClassification,
30
+ MobileViTForSemanticSegmentation,
31
+ MobileViTImageProcessor,
32
+ )
33
+ from transformers.utils import logging
34
+
35
+
36
+ logging.set_verbosity_info()
37
+ logger = logging.get_logger(__name__)
38
+
39
+
40
+ def get_mobilevit_config(mobilevit_name):
41
+ config = MobileViTConfig()
42
+
43
+ # size of the architecture
44
+ if "mobilevit_s" in mobilevit_name:
45
+ config.hidden_sizes = [144, 192, 240]
46
+ config.neck_hidden_sizes = [16, 32, 64, 96, 128, 160, 640]
47
+ elif "mobilevit_xs" in mobilevit_name:
48
+ config.hidden_sizes = [96, 120, 144]
49
+ config.neck_hidden_sizes = [16, 32, 48, 64, 80, 96, 384]
50
+ elif "mobilevit_xxs" in mobilevit_name:
51
+ config.hidden_sizes = [64, 80, 96]
52
+ config.neck_hidden_sizes = [16, 16, 24, 48, 64, 80, 320]
53
+ config.hidden_dropout_prob = 0.05
54
+ config.expand_ratio = 2.0
55
+
56
+ if mobilevit_name.startswith("deeplabv3_"):
57
+ config.image_size = 512
58
+ config.output_stride = 16
59
+ config.num_labels = 21
60
+ filename = "pascal-voc-id2label.json"
61
+ else:
62
+ config.num_labels = 1000
63
+ filename = "imagenet-1k-id2label.json"
64
+
65
+ repo_id = "huggingface/label-files"
66
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
67
+ id2label = {int(k): v for k, v in id2label.items()}
68
+ config.id2label = id2label
69
+ config.label2id = {v: k for k, v in id2label.items()}
70
+
71
+ return config
72
+
73
+
74
+ def rename_key(name, base_model=False):
75
+ for i in range(1, 6):
76
+ if f"layer_{i}." in name:
77
+ name = name.replace(f"layer_{i}.", f"encoder.layer.{i - 1}.")
78
+
79
+ if "conv_1." in name:
80
+ name = name.replace("conv_1.", "conv_stem.")
81
+ if ".block." in name:
82
+ name = name.replace(".block.", ".")
83
+
84
+ if "exp_1x1" in name:
85
+ name = name.replace("exp_1x1", "expand_1x1")
86
+ if "red_1x1" in name:
87
+ name = name.replace("red_1x1", "reduce_1x1")
88
+ if ".local_rep.conv_3x3." in name:
89
+ name = name.replace(".local_rep.conv_3x3.", ".conv_kxk.")
90
+ if ".local_rep.conv_1x1." in name:
91
+ name = name.replace(".local_rep.conv_1x1.", ".conv_1x1.")
92
+ if ".norm." in name:
93
+ name = name.replace(".norm.", ".normalization.")
94
+ if ".conv." in name:
95
+ name = name.replace(".conv.", ".convolution.")
96
+ if ".conv_proj." in name:
97
+ name = name.replace(".conv_proj.", ".conv_projection.")
98
+
99
+ for i in range(0, 2):
100
+ for j in range(0, 4):
101
+ if f".{i}.{j}." in name:
102
+ name = name.replace(f".{i}.{j}.", f".{i}.layer.{j}.")
103
+
104
+ for i in range(2, 6):
105
+ for j in range(0, 4):
106
+ if f".{i}.{j}." in name:
107
+ name = name.replace(f".{i}.{j}.", f".{i}.")
108
+ if "expand_1x1" in name:
109
+ name = name.replace("expand_1x1", "downsampling_layer.expand_1x1")
110
+ if "conv_3x3" in name:
111
+ name = name.replace("conv_3x3", "downsampling_layer.conv_3x3")
112
+ if "reduce_1x1" in name:
113
+ name = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1")
114
+
115
+ for i in range(2, 5):
116
+ if f".global_rep.{i}.weight" in name:
117
+ name = name.replace(f".global_rep.{i}.weight", ".layernorm.weight")
118
+ if f".global_rep.{i}.bias" in name:
119
+ name = name.replace(f".global_rep.{i}.bias", ".layernorm.bias")
120
+
121
+ if ".global_rep." in name:
122
+ name = name.replace(".global_rep.", ".transformer.")
123
+ if ".pre_norm_mha.0." in name:
124
+ name = name.replace(".pre_norm_mha.0.", ".layernorm_before.")
125
+ if ".pre_norm_mha.1.out_proj." in name:
126
+ name = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense.")
127
+ if ".pre_norm_ffn.0." in name:
128
+ name = name.replace(".pre_norm_ffn.0.", ".layernorm_after.")
129
+ if ".pre_norm_ffn.1." in name:
130
+ name = name.replace(".pre_norm_ffn.1.", ".intermediate.dense.")
131
+ if ".pre_norm_ffn.4." in name:
132
+ name = name.replace(".pre_norm_ffn.4.", ".output.dense.")
133
+ if ".transformer." in name:
134
+ name = name.replace(".transformer.", ".transformer.layer.")
135
+
136
+ if ".aspp_layer." in name:
137
+ name = name.replace(".aspp_layer.", ".")
138
+ if ".aspp_pool." in name:
139
+ name = name.replace(".aspp_pool.", ".")
140
+ if "seg_head." in name:
141
+ name = name.replace("seg_head.", "segmentation_head.")
142
+ if "segmentation_head.classifier.classifier." in name:
143
+ name = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier.")
144
+
145
+ if "classifier.fc." in name:
146
+ name = name.replace("classifier.fc.", "classifier.")
147
+ elif (not base_model) and ("segmentation_head." not in name):
148
+ name = "mobilevit." + name
149
+
150
+ return name
151
+
152
+
153
+ def convert_state_dict(orig_state_dict, model, base_model=False):
154
+ if base_model:
155
+ model_prefix = ""
156
+ else:
157
+ model_prefix = "mobilevit."
158
+
159
+ for key in orig_state_dict.copy().keys():
160
+ val = orig_state_dict.pop(key)
161
+
162
+ if key[:8] == "encoder.":
163
+ key = key[8:]
164
+
165
+ if "qkv" in key:
166
+ key_split = key.split(".")
167
+ layer_num = int(key_split[0][6:]) - 1
168
+ transformer_num = int(key_split[3])
169
+ layer = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}")
170
+ dim = layer.transformer.layer[transformer_num].attention.attention.all_head_size
171
+ prefix = (
172
+ f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
173
+ )
174
+ if "weight" in key:
175
+ orig_state_dict[prefix + "query.weight"] = val[:dim, :]
176
+ orig_state_dict[prefix + "key.weight"] = val[dim : dim * 2, :]
177
+ orig_state_dict[prefix + "value.weight"] = val[-dim:, :]
178
+ else:
179
+ orig_state_dict[prefix + "query.bias"] = val[:dim]
180
+ orig_state_dict[prefix + "key.bias"] = val[dim : dim * 2]
181
+ orig_state_dict[prefix + "value.bias"] = val[-dim:]
182
+ else:
183
+ orig_state_dict[rename_key(key, base_model)] = val
184
+
185
+ return orig_state_dict
186
+
187
+
188
+ # We will verify our results on an image of cute cats
189
+ def prepare_img():
190
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
191
+ im = Image.open(requests.get(url, stream=True).raw)
192
+ return im
193
+
194
+
195
+ @torch.no_grad()
196
+ def convert_movilevit_checkpoint(mobilevit_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
197
+ """
198
+ Copy/paste/tweak model's weights to our MobileViT structure.
199
+ """
200
+ config = get_mobilevit_config(mobilevit_name)
201
+
202
+ # load original state_dict
203
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
204
+
205
+ # load 🤗 model
206
+ if mobilevit_name.startswith("deeplabv3_"):
207
+ model = MobileViTForSemanticSegmentation(config).eval()
208
+ else:
209
+ model = MobileViTForImageClassification(config).eval()
210
+
211
+ new_state_dict = convert_state_dict(state_dict, model)
212
+ model.load_state_dict(new_state_dict)
213
+
214
+ # Check outputs on an image, prepared by MobileViTImageProcessor
215
+ image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32)
216
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
217
+ outputs = model(**encoding)
218
+ logits = outputs.logits
219
+
220
+ if mobilevit_name.startswith("deeplabv3_"):
221
+ assert logits.shape == (1, 21, 32, 32)
222
+
223
+ if mobilevit_name == "deeplabv3_mobilevit_s":
224
+ expected_logits = torch.tensor(
225
+ [
226
+ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
227
+ [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
228
+ [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
229
+ ]
230
+ )
231
+ elif mobilevit_name == "deeplabv3_mobilevit_xs":
232
+ expected_logits = torch.tensor(
233
+ [
234
+ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
235
+ [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
236
+ [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
237
+ ]
238
+ )
239
+ elif mobilevit_name == "deeplabv3_mobilevit_xxs":
240
+ expected_logits = torch.tensor(
241
+ [
242
+ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
243
+ [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
244
+ [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
245
+ ]
246
+ )
247
+ else:
248
+ raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
249
+
250
+ assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4)
251
+ else:
252
+ assert logits.shape == (1, 1000)
253
+
254
+ if mobilevit_name == "mobilevit_s":
255
+ expected_logits = torch.tensor([-0.9866, 0.2392, -1.1241])
256
+ elif mobilevit_name == "mobilevit_xs":
257
+ expected_logits = torch.tensor([-2.4761, -0.9399, -1.9587])
258
+ elif mobilevit_name == "mobilevit_xxs":
259
+ expected_logits = torch.tensor([-1.9364, -1.2327, -0.4653])
260
+ else:
261
+ raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
262
+
263
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
264
+
265
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
266
+ print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}")
267
+ model.save_pretrained(pytorch_dump_folder_path)
268
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
269
+ image_processor.save_pretrained(pytorch_dump_folder_path)
270
+
271
+ if push_to_hub:
272
+ model_mapping = {
273
+ "mobilevit_s": "mobilevit-small",
274
+ "mobilevit_xs": "mobilevit-x-small",
275
+ "mobilevit_xxs": "mobilevit-xx-small",
276
+ "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
277
+ "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
278
+ "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
279
+ }
280
+
281
+ print("Pushing to the hub...")
282
+ model_name = model_mapping[mobilevit_name]
283
+ image_processor.push_to_hub(model_name, organization="apple")
284
+ model.push_to_hub(model_name, organization="apple")
285
+
286
+
287
+ if __name__ == "__main__":
288
+ parser = argparse.ArgumentParser()
289
+ # Required parameters
290
+ parser.add_argument(
291
+ "--mobilevit_name",
292
+ default="mobilevit_s",
293
+ type=str,
294
+ help=(
295
+ "Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
296
+ " 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
297
+ ),
298
+ )
299
+ parser.add_argument(
300
+ "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
301
+ )
302
+ parser.add_argument(
303
+ "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
304
+ )
305
+ parser.add_argument(
306
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
307
+ )
308
+
309
+ args = parser.parse_args()
310
+ convert_movilevit_checkpoint(
311
+ args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
312
+ )