applied-ai-018 commited on
Commit
a38378e
·
verified ·
1 Parent(s): f357968

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__init__.py +103 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/configuration_bloom.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/convert_bloom_original_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_bloom.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_flax_bloom.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/tokenization_bloom_fast.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/configuration_bloom.py +236 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py +255 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/modeling_bloom.py +1243 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/modeling_flax_bloom.py +734 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/tokenization_bloom_fast.py +164 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/esm/__init__.py +94 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/esm/configuration_esm.py +361 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/esm/convert_esm.py +400 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/esm/modeling_esm.py +1265 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/esm/modeling_esmfold.py +2322 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/esm/modeling_tf_esm.py +1567 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/feats.py +255 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/esm/tokenization_esm.py +143 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__init__.py +104 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/__init__.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/configuration_layoutlmv2.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/feature_extraction_layoutlmv2.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/image_processing_layoutlmv2.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/modeling_layoutlmv2.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/processing_layoutlmv2.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/tokenization_layoutlmv2.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/tokenization_layoutlmv2_fast.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/configuration_layoutlmv2.py +222 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py +35 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/modeling_layoutlmv2.py +1407 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/processing_layoutlmv2.py +201 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/tokenization_layoutlmv2.py +1542 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py +793 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/configuration_layoutlmv3.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/feature_extraction_layoutlmv3.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/image_processing_layoutlmv3.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_layoutlmv3.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_tf_layoutlmv3.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/processing_layoutlmv3.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3_fast.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py +837 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/__init__.py +58 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/__init__.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/tokenization_mbart50.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/tokenization_mbart50.py +354 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/tokenization_mbart50_fast.py +259 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/__init__.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__init__.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
28
+ }
29
+ try:
30
+ if not is_tokenizers_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_bloom_fast"] = ["BloomTokenizerFast"]
36
+
37
+ try:
38
+ if not is_torch_available():
39
+ raise OptionalDependencyNotAvailable()
40
+ except OptionalDependencyNotAvailable:
41
+ pass
42
+ else:
43
+ _import_structure["modeling_bloom"] = [
44
+ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
45
+ "BloomForCausalLM",
46
+ "BloomModel",
47
+ "BloomPreTrainedModel",
48
+ "BloomForSequenceClassification",
49
+ "BloomForTokenClassification",
50
+ "BloomForQuestionAnswering",
51
+ ]
52
+
53
+ try:
54
+ if not is_flax_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ _import_structure["modeling_flax_bloom"] = [
60
+ "FlaxBloomForCausalLM",
61
+ "FlaxBloomModel",
62
+ "FlaxBloomPreTrainedModel",
63
+ ]
64
+
65
+
66
+ if TYPE_CHECKING:
67
+ from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
68
+
69
+ try:
70
+ if not is_tokenizers_available():
71
+ raise OptionalDependencyNotAvailable()
72
+ except OptionalDependencyNotAvailable:
73
+ pass
74
+ else:
75
+ from .tokenization_bloom_fast import BloomTokenizerFast
76
+
77
+ try:
78
+ if not is_torch_available():
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ pass
82
+ else:
83
+ from .modeling_bloom import (
84
+ BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
85
+ BloomForCausalLM,
86
+ BloomForQuestionAnswering,
87
+ BloomForSequenceClassification,
88
+ BloomForTokenClassification,
89
+ BloomModel,
90
+ BloomPreTrainedModel,
91
+ )
92
+
93
+ try:
94
+ if not is_flax_available():
95
+ raise OptionalDependencyNotAvailable()
96
+ except OptionalDependencyNotAvailable:
97
+ pass
98
+ else:
99
+ from .modeling_flax_bloom import FlaxBloomForCausalLM, FlaxBloomModel, FlaxBloomPreTrainedModel
100
+ else:
101
+ import sys
102
+
103
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/configuration_bloom.cpython-310.pyc ADDED
Binary file (8.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/convert_bloom_original_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_bloom.cpython-310.pyc ADDED
Binary file (35.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_flax_bloom.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/tokenization_bloom_fast.cpython-310.pyc ADDED
Binary file (5.71 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/configuration_bloom.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Bloom configuration"""
16
+ from collections import OrderedDict
17
+ from typing import TYPE_CHECKING, Any, List, Mapping, Optional
18
+
19
+ from packaging import version
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ... import PreTrainedTokenizer, TensorType
24
+
25
+ from ...configuration_utils import PretrainedConfig
26
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
27
+ from ...utils import is_torch_available, logging
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ from ..deprecated._archive_maps import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
34
+
35
+
36
+ class BloomConfig(PretrainedConfig):
37
+ """
38
+ This is the configuration class to store the configuration of a [`BloomModel`]. It is used to instantiate a Bloom
39
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
40
+ defaults will yield a similar configuration to the Bloom architecture
41
+ [bigscience/bloom](https://huggingface.co/bigscience/bloom).
42
+
43
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
44
+ documentation from [`PretrainedConfig`] for more information.
45
+
46
+
47
+ Args:
48
+ vocab_size (`int`, *optional*, defaults to 250880):
49
+ Vocabulary size of the Bloom model. Defines the maximum number of different tokens that can be represented
50
+ by the `inputs_ids` passed when calling [`BloomModel`]. Check [this
51
+ discussion](https://huggingface.co/bigscience/bloom/discussions/120#633d28389addb8530b406c2a) on how the
52
+ `vocab_size` has been defined.
53
+ hidden_size (`int`, *optional*, defaults to 64):
54
+ Dimensionality of the embeddings and hidden states.
55
+ n_layer (`int`, *optional*, defaults to 2):
56
+ Number of hidden layers in the Transformer encoder.
57
+ n_head (`int`, *optional*, defaults to 8):
58
+ Number of attention heads for each attention layer in the Transformer encoder.
59
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
60
+ The epsilon to use in the layer normalization layers.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`):
64
+ If enabled, use the layer norm of the hidden states as the residual in the transformer blocks
65
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
66
+ Dropout rate of the dropout function on the bias dropout.
67
+ attention_dropout (`float`, *optional*, defaults to 0.1):
68
+ Dropout rate applied to the attention probs
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models).
71
+ pretraining_tp (`int`, *optional*, defaults to `1`):
72
+ Experimental feature. Tensor parallelism rank used during pretraining with Megatron. Please refer to [this
73
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
74
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
75
+ issue](https://github.com/pytorch/pytorch/issues/76232). Note also that this is enabled only when
76
+ `slow_but_exact=True`.
77
+ slow_but_exact (`bool`, *optional*, defaults to `False`):
78
+ Experimental feature. Whether to use slow but exact implementation of the attention mechanism. While
79
+ merging the TP rank tensors, due to slicing operations the results may be slightly different between the
80
+ model trained on Megatron and our model. Please refer to [this
81
+ issue](https://github.com/pytorch/pytorch/issues/76232). A solution to obtain more accurate results is to
82
+ enable this feature. Enabling this will hurt the computational time of the inference. Will be probably
83
+ resolved in the future once the main model has been fine-tuned with TP_rank=1.
84
+
85
+ Example:
86
+
87
+ ```python
88
+ >>> from transformers import BloomConfig, BloomModel
89
+
90
+ >>> # Initializing a Bloom configuration
91
+ >>> configuration = BloomConfig()
92
+
93
+ >>> # Initializing a model (with random weights) from the configuration
94
+ >>> model = BloomModel(configuration)
95
+
96
+ >>> # Accessing the model configuration
97
+ >>> configuration = model.config
98
+ ```"""
99
+
100
+ model_type = "bloom"
101
+ keys_to_ignore_at_inference = ["past_key_values"]
102
+ attribute_map = {
103
+ "num_hidden_layers": "n_layer",
104
+ "num_attention_heads": "n_head",
105
+ }
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size=250880,
110
+ hidden_size=64,
111
+ n_layer=2,
112
+ n_head=8,
113
+ layer_norm_epsilon=1e-5,
114
+ initializer_range=0.02,
115
+ use_cache=True,
116
+ bos_token_id=1,
117
+ eos_token_id=2,
118
+ apply_residual_connection_post_layernorm=False,
119
+ hidden_dropout=0.0,
120
+ attention_dropout=0.0,
121
+ pretraining_tp=1, # TP rank used when training with megatron
122
+ slow_but_exact=False,
123
+ **kwargs,
124
+ ):
125
+ self.vocab_size = vocab_size
126
+ # Backward compatibility with n_embed kwarg
127
+ n_embed = kwargs.pop("n_embed", None)
128
+ self.hidden_size = hidden_size if n_embed is None else n_embed
129
+ self.n_layer = n_layer
130
+ self.n_head = n_head
131
+ self.layer_norm_epsilon = layer_norm_epsilon
132
+ self.initializer_range = initializer_range
133
+ self.use_cache = use_cache
134
+ self.pretraining_tp = pretraining_tp
135
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
136
+ self.hidden_dropout = hidden_dropout
137
+ self.attention_dropout = attention_dropout
138
+
139
+ self.bos_token_id = bos_token_id
140
+ self.eos_token_id = eos_token_id
141
+ self.slow_but_exact = slow_but_exact
142
+
143
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
144
+
145
+
146
+ class BloomOnnxConfig(OnnxConfigWithPast):
147
+ torch_onnx_minimum_version = version.parse("1.12")
148
+
149
+ def __init__(
150
+ self,
151
+ config: PretrainedConfig,
152
+ task: str = "default",
153
+ patching_specs: List[PatchingSpec] = None,
154
+ use_past: bool = False,
155
+ ):
156
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
157
+ if not getattr(self._config, "pad_token_id", None):
158
+ # TODO: how to do that better?
159
+ self._config.pad_token_id = 0
160
+
161
+ @property
162
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
163
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
164
+ if self.use_past:
165
+ # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
166
+ self.fill_with_past_key_values_(common_inputs, direction="inputs", inverted_values_shape=True)
167
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
168
+ else:
169
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
170
+
171
+ return common_inputs
172
+
173
+ @property
174
+ def num_layers(self) -> int:
175
+ return self._config.n_layer
176
+
177
+ @property
178
+ def num_attention_heads(self) -> int:
179
+ return self._config.n_head
180
+
181
+ @property
182
+ def atol_for_validation(self) -> float:
183
+ return 1e-3
184
+
185
+ def generate_dummy_inputs(
186
+ self,
187
+ tokenizer: "PreTrainedTokenizer",
188
+ batch_size: int = -1,
189
+ seq_length: int = -1,
190
+ is_pair: bool = False,
191
+ framework: Optional["TensorType"] = None,
192
+ ) -> Mapping[str, Any]:
193
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
194
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
195
+ )
196
+
197
+ # We need to order the input in the way they appears in the forward()
198
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
199
+
200
+ # Need to add the past_keys
201
+ if self.use_past:
202
+ if not is_torch_available():
203
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
204
+ else:
205
+ import torch
206
+
207
+ batch, seqlen = common_inputs["input_ids"].shape
208
+ # Not using the same length for past_key_values
209
+ past_key_values_length = seqlen + 2
210
+ head_dim = self._config.hidden_size // self.num_attention_heads
211
+ past_key_shape = (
212
+ batch * self.num_attention_heads,
213
+ head_dim,
214
+ past_key_values_length,
215
+ )
216
+ past_value_shape = (
217
+ batch * self.num_attention_heads,
218
+ past_key_values_length,
219
+ head_dim,
220
+ )
221
+ ordered_inputs["past_key_values"] = [
222
+ (torch.zeros(past_key_shape), torch.zeros(past_value_shape)) for _ in range(self.num_layers)
223
+ ]
224
+
225
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
226
+ if self.use_past:
227
+ mask_dtype = ordered_inputs["attention_mask"].dtype
228
+ ordered_inputs["attention_mask"] = torch.cat(
229
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
230
+ )
231
+
232
+ return ordered_inputs
233
+
234
+ @property
235
+ def default_onnx_opset(self) -> int:
236
+ return 13
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert BigScience BLOOM checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ import os
21
+ import re
22
+
23
+ import torch
24
+
25
+ from transformers import BloomConfig, BloomModel
26
+ from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
27
+ from transformers.utils import logging
28
+
29
+
30
+ logging.set_verbosity_info()
31
+
32
+ WEIGHTS_TO_AVERAGE_ENDSWITH = [
33
+ "word_embeddings_layernorm.weight",
34
+ "word_embeddings_layernorm.bias",
35
+ "input_layernorm.weight",
36
+ "input_layernorm.bias",
37
+ "post_attention_layernorm.weight",
38
+ "post_attention_layernorm.bias",
39
+ "self_attention.dense.bias",
40
+ "mlp.dense_4h_to_h.bias",
41
+ "ln_f.weight",
42
+ "ln_f.bias",
43
+ ]
44
+
45
+ WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN = [
46
+ "mlp.dense_4h_to_h.weight",
47
+ "self_attention.dense.weight",
48
+ ]
49
+
50
+
51
+ def layer_name_mapping(key, file):
52
+ """Convert Megatron-DeepSpeed TP/PP weights mapping in transformers PP only"""
53
+ # Handle first and last layers
54
+ layer_rename_map = {
55
+ "word_embeddings.weight": "word_embeddings.weight",
56
+ "word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
57
+ "word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
58
+ "weight": "ln_f.weight",
59
+ "bias": "ln_f.bias",
60
+ }
61
+
62
+ if key in layer_rename_map:
63
+ return layer_rename_map[key]
64
+
65
+ # Handle transformer blocks
66
+ layer_number = int(re.match(r".*layer_(\d*).*", file)[1])
67
+ layer_number -= 3
68
+ return f"h.{layer_number}." + key
69
+
70
+
71
+ def get_dtype_size(dtype):
72
+ if dtype == torch.bool:
73
+ return 1 / 8
74
+ bit_search = re.search(r"[^\d](\d+)$", str(dtype))
75
+ if bit_search is None:
76
+ raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
77
+ bit_size = int(bit_search.groups()[0])
78
+ return bit_size // 8
79
+
80
+
81
+ def convert_bloom_checkpoint_to_pytorch(
82
+ bloom_checkpoint_path, bloom_config_file, pytorch_dump_folder_path, shard_model, pretraining_tp
83
+ ):
84
+ # Construct model
85
+ if bloom_config_file == "":
86
+ config = BloomConfig()
87
+ else:
88
+ config = BloomConfig.from_json_file(bloom_config_file)
89
+
90
+ if shard_model:
91
+ file_names = os.listdir(bloom_checkpoint_path)
92
+ file_names = sorted(filter(lambda s: s.startswith("layer") and "model_00" in s, file_names))
93
+
94
+ index_dict = {"weight_map": {}, "metadata": {}}
95
+ total_size = 0
96
+
97
+ missing_keys = None
98
+
99
+ config = BloomConfig()
100
+
101
+ for j, file in enumerate(file_names):
102
+ print("Processing file: {}".format(file))
103
+ tensors = None
104
+
105
+ for i in range(pretraining_tp):
106
+ # load all TP files
107
+ f_name = file.replace("model_00", f"model_0{i}")
108
+ temp = torch.load(os.path.join(bloom_checkpoint_path, f_name), map_location="cpu")
109
+
110
+ # Rename keys in the transformers names
111
+ keys = list(temp.keys())
112
+ for key in keys:
113
+ temp[layer_name_mapping(key, file)] = temp.pop(key)
114
+
115
+ if tensors is None:
116
+ tensors = temp
117
+ else:
118
+ for key in tensors.keys():
119
+ if any(key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
120
+ # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
121
+ tensors[key] += temp[key]
122
+ else:
123
+ # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
124
+ cat_dim = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
125
+ # We concatenate these weights accross TP ranks
126
+ tensors[key] = torch.cat([tensors[key], temp[key]], dim=cat_dim)
127
+
128
+ # Divide by the number of TP the weights we want to average
129
+ for key in tensors.keys():
130
+ if any(key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
131
+ tensors[key] = tensors[key] / pretraining_tp
132
+ torch.save(
133
+ tensors,
134
+ os.path.join(
135
+ pytorch_dump_folder_path,
136
+ "pytorch_model_{}-of-{}.bin".format(str(j + 1).zfill(5), str(len(file_names)).zfill(5)),
137
+ ),
138
+ )
139
+
140
+ for key in tensors.keys():
141
+ value = tensors[key]
142
+ total_size += value.numel() * get_dtype_size(value.dtype)
143
+ if key not in index_dict["weight_map"]:
144
+ index_dict["weight_map"][key] = "pytorch_model_{}-of-{}.bin".format(
145
+ str(j + 1).zfill(5), str(len(file_names)).zfill(5)
146
+ )
147
+
148
+ config = BloomConfig()
149
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
150
+ index_dict["metadata"]["total_size"] = total_size
151
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
152
+ f.write(config.to_json_string())
153
+ with open(os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME + ".index.json"), "w", encoding="utf-8") as f:
154
+ json_config = json.dumps(index_dict, indent=2, sort_keys=True) + "\n"
155
+ f.write(json_config)
156
+ else:
157
+ model = BloomModel(config)
158
+
159
+ file_names = os.listdir(bloom_checkpoint_path)
160
+ file_names = sorted(filter(lambda s: s.startswith("layer") and "model_00" in s, file_names))
161
+
162
+ missing_keys = None
163
+ for i, file in enumerate(file_names):
164
+ tensors = None
165
+ for i in range(pretraining_tp):
166
+ # load all TP files
167
+ f_name = file.replace("model_00", f"model_0{i}")
168
+ temp = torch.load(os.path.join(bloom_checkpoint_path, f_name), map_location="cpu")
169
+
170
+ # Rename keys in the transformers names
171
+ keys = list(temp.keys())
172
+ for key in keys:
173
+ temp[layer_name_mapping(key, file)] = temp.pop(key)
174
+
175
+ if tensors is None:
176
+ tensors = temp
177
+ else:
178
+ for key in tensors.keys():
179
+ # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
180
+ if any(key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
181
+ tensors[key] += temp[key]
182
+ else:
183
+ # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
184
+ cat_dim = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
185
+ # We concatenate these weights accross TP ranks
186
+ tensors[key] = torch.cat([tensors[key], temp[key]], dim=cat_dim)
187
+
188
+ # Divide by the number of TP the weights we want to average
189
+ for key in tensors.keys():
190
+ if any(key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
191
+ tensors[key] = tensors[key] / pretraining_tp
192
+
193
+ other_keys = model.load_state_dict(tensors, strict=False)
194
+ assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
195
+ if missing_keys is None:
196
+ missing_keys = set(other_keys.missing_keys)
197
+ else:
198
+ missing_keys = missing_keys.intersection(set(other_keys.missing_keys))
199
+
200
+ assert not missing_keys, f"The keys {missing_keys} are missing"
201
+
202
+ # Save pytorch-model
203
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
204
+ pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
205
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
206
+ print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}")
207
+ if config.torch_dtype is not None:
208
+ model = model.to(config.torch_dtype)
209
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
210
+ print(f"Save configuration file to {pytorch_config_dump_path}")
211
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
212
+ f.write(config.to_json_string())
213
+
214
+
215
+ if __name__ == "__main__":
216
+ parser = argparse.ArgumentParser()
217
+ # Required parameters
218
+ parser.add_argument(
219
+ "--bloom_checkpoint_path",
220
+ default=None,
221
+ type=str,
222
+ required=True,
223
+ help="Path to the Megatron-LM checkpoint path.",
224
+ )
225
+ parser.add_argument(
226
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
227
+ )
228
+ parser.add_argument(
229
+ "--bloom_config_file",
230
+ default="",
231
+ type=str,
232
+ help=(
233
+ "An optional config json file corresponding to the pre-trained model. \n"
234
+ "This specifies the model architecture."
235
+ ),
236
+ )
237
+ parser.add_argument(
238
+ "--shard_model",
239
+ action="store_true",
240
+ help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
241
+ )
242
+ parser.add_argument(
243
+ "--pretraining_tp",
244
+ default=4,
245
+ type=int,
246
+ help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
247
+ )
248
+ args = parser.parse_args()
249
+ convert_bloom_checkpoint_to_pytorch(
250
+ args.bloom_checkpoint_path,
251
+ args.bloom_config_file,
252
+ args.pytorch_dump_folder_path,
253
+ args.shard_model,
254
+ args.pretraining_tp,
255
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/modeling_bloom.py ADDED
@@ -0,0 +1,1243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 HuggingFace Inc. team and BigScience workshop.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch BLOOM model."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
25
+ from torch.nn import functional as F
26
+
27
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
28
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
29
+ from ...modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ CausalLMOutputWithCrossAttentions,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutputWithPast,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import logging
38
+ from .configuration_bloom import BloomConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "bigscience/bloom-560m"
44
+ _CONFIG_FOR_DOC = "BloomConfig"
45
+
46
+
47
+ from ..deprecated._archive_maps import BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
48
+
49
+
50
+ def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
51
+ """
52
+ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it
53
+ relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value
54
+ `softmax(l+a) = softmax(l)`. Based on
55
+ https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
56
+ TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly.
57
+
58
+ Args:
59
+ Returns tensor shaped (batch_size * num_heads, 1, max_seq_len)
60
+ attention_mask (`torch.Tensor`):
61
+ Token-wise attention mask, this should be of shape (batch_size, max_seq_len).
62
+ num_heads (`int`, *required*):
63
+ number of heads
64
+ dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`):
65
+ dtype of the output tensor
66
+ """
67
+ batch_size, seq_length = attention_mask.shape
68
+ closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
69
+ base = torch.tensor(
70
+ 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
71
+ )
72
+ powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
73
+ slopes = torch.pow(base, powers)
74
+
75
+ if closest_power_of_2 != num_heads:
76
+ extra_base = torch.tensor(
77
+ 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
78
+ )
79
+ num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
80
+ extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
81
+ slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
82
+
83
+ # Note: alibi will added to the attention bias that will be applied to the query, key product of attention
84
+ # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
85
+ # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
86
+ # => the query_length dimension will then be broadcasted correctly
87
+ # This is more or less identical to T5's relative position bias:
88
+ # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
89
+ arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
90
+ alibi = slopes[..., None] * arange_tensor
91
+ return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
92
+
93
+
94
+ def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
95
+ """
96
+ Dropout add function
97
+
98
+ Args:
99
+ x (`torch.tensor`, *required*):
100
+ input tensor
101
+ residual (`torch.tensor`, *required*):
102
+ residual tensor
103
+ prob (`float`, *required*):
104
+ dropout probability
105
+ training (`bool`, *required*):
106
+ training mode
107
+ """
108
+ out = F.dropout(x, p=prob, training=training)
109
+ out = residual + out
110
+ return out
111
+
112
+
113
+ def bloom_gelu_forward(x: torch.Tensor) -> torch.Tensor:
114
+ """
115
+ Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple implementation (inference) to
116
+ make the model jitable.
117
+
118
+ Args:
119
+ x (`torch.tensor`, *required*):
120
+ input hidden states
121
+ """
122
+ return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
123
+
124
+
125
+ def bloom_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
126
+ """
127
+ gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) +
128
+ 0.3989423 * x * torch.exp(-0.5 * x * x)
129
+
130
+ Args:
131
+ g (`torch.tensor`, *required*):
132
+ gradient output tensor
133
+ x (`torch.tensor`, *required*):
134
+ input tensor
135
+ """
136
+ x = x[0] # x is a tuple of 1 element, needs to unpack it first
137
+ tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
138
+ # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
139
+ ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
140
+ return ff * g
141
+
142
+
143
+ class GeLUFunction(torch.autograd.Function):
144
+ @staticmethod
145
+ def forward(ctx, input: torch.Tensor) -> torch.Tensor:
146
+ ctx.save_for_backward(input)
147
+ return bloom_gelu_forward(input)
148
+
149
+ @staticmethod
150
+ def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
151
+ input = ctx.saved_tensors
152
+ tmp = bloom_gelu_back(grad_output, input)
153
+ return tmp
154
+
155
+
156
+ class BloomGelu(nn.Module):
157
+ """
158
+ BloomBiasGelu wrapper function that make use of the simple function on inference mode to make the model
159
+ torchscriptable and use the autograd function in training mode to get the accurate results of the gradients Partly
160
+ copied from Megatron-DeepSpeed code and adapted for our needs
161
+
162
+ See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329
163
+ """
164
+
165
+ def __init__(self):
166
+ super().__init__()
167
+
168
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
169
+ if self.training:
170
+ return GeLUFunction.apply(x)
171
+ else:
172
+ return bloom_gelu_forward(x)
173
+
174
+
175
+ class BloomAttention(nn.Module):
176
+ def __init__(self, config: BloomConfig):
177
+ super().__init__()
178
+
179
+ self.pretraining_tp = config.pretraining_tp
180
+ self.slow_but_exact = config.slow_but_exact
181
+
182
+ self.hidden_size = config.hidden_size
183
+ self.num_heads = config.n_head
184
+ self.head_dim = self.hidden_size // self.num_heads
185
+ self.split_size = self.hidden_size
186
+ self.hidden_dropout = config.hidden_dropout
187
+
188
+ if self.head_dim * self.num_heads != self.hidden_size:
189
+ raise ValueError(
190
+ f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
191
+ f" {self.num_heads})."
192
+ )
193
+
194
+ # Layer-wise attention scaling
195
+ self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
196
+ self.beta = 1.0
197
+
198
+ self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True)
199
+ self.dense = nn.Linear(self.hidden_size, self.hidden_size)
200
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
201
+
202
+ def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
203
+ """
204
+ Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
205
+ storage as `fused_qkv`
206
+
207
+ Args:
208
+ fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim]
209
+
210
+ Returns:
211
+ query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
212
+ value: [batch_size, seq_length, num_heads, head_dim]
213
+ """
214
+ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
215
+ fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
216
+ return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]
217
+
218
+ def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
219
+ """
220
+ Merge heads together over the last dimension
221
+
222
+ Args:
223
+ x (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim]
224
+
225
+ Returns:
226
+ torch.tensor: [batch_size, seq_length, num_heads * head_dim]
227
+ """
228
+ # What we want to achieve is:
229
+ # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim
230
+ batch_size_and_num_heads, seq_length, _ = x.shape
231
+ batch_size = batch_size_and_num_heads // self.num_heads
232
+
233
+ # First view to decompose the batch size
234
+ # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim
235
+ x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
236
+
237
+ # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim
238
+ x = x.permute(0, 2, 1, 3)
239
+
240
+ # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim
241
+ return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
242
+
243
+ def forward(
244
+ self,
245
+ hidden_states: torch.Tensor,
246
+ residual: torch.Tensor,
247
+ alibi: torch.Tensor,
248
+ attention_mask: torch.Tensor,
249
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
250
+ head_mask: Optional[torch.Tensor] = None,
251
+ use_cache: bool = False,
252
+ output_attentions: bool = False,
253
+ ):
254
+ fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size]
255
+
256
+ # 3 x [batch_size, seq_length, num_heads, head_dim]
257
+ (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv)
258
+
259
+ batch_size, q_length, _, _ = query_layer.shape
260
+
261
+ query_layer = query_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim)
262
+ key_layer = key_layer.permute(0, 2, 3, 1).reshape(batch_size * self.num_heads, self.head_dim, q_length)
263
+ value_layer = value_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim)
264
+ if layer_past is not None:
265
+ past_key, past_value = layer_past
266
+ # concatenate along seq_length dimension:
267
+ # - key: [batch_size * self.num_heads, head_dim, kv_length]
268
+ # - value: [batch_size * self.num_heads, kv_length, head_dim]
269
+ key_layer = torch.cat((past_key, key_layer), dim=2)
270
+ value_layer = torch.cat((past_value, value_layer), dim=1)
271
+
272
+ _, _, kv_length = key_layer.shape
273
+
274
+ if use_cache is True:
275
+ present = (key_layer, value_layer)
276
+ else:
277
+ present = None
278
+
279
+ # [batch_size * num_heads, q_length, kv_length]
280
+ # we use `torch.Tensor.baddbmm` instead of `torch.baddbmm` as the latter isn't supported by TorchScript v1.11
281
+ matmul_result = alibi.baddbmm(
282
+ batch1=query_layer,
283
+ batch2=key_layer,
284
+ beta=self.beta,
285
+ alpha=self.inv_norm_factor,
286
+ )
287
+
288
+ # change view to [batch_size, num_heads, q_length, kv_length]
289
+ attention_scores = matmul_result.view(batch_size, self.num_heads, q_length, kv_length)
290
+
291
+ # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length]
292
+ input_dtype = attention_scores.dtype
293
+ # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38`
294
+ if input_dtype == torch.float16:
295
+ attention_scores = attention_scores.to(torch.float)
296
+ attn_weights = torch.masked_fill(attention_scores, attention_mask, torch.finfo(attention_scores.dtype).min)
297
+ attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype)
298
+
299
+ # [batch_size, num_heads, q_length, kv_length]
300
+ attention_probs = self.attention_dropout(attention_probs)
301
+
302
+ if head_mask is not None:
303
+ attention_probs = attention_probs * head_mask
304
+
305
+ # change view [batch_size x num_heads, q_length, kv_length]
306
+ attention_probs_reshaped = attention_probs.view(batch_size * self.num_heads, q_length, kv_length)
307
+
308
+ # matmul: [batch_size * num_heads, q_length, head_dim]
309
+ context_layer = torch.bmm(attention_probs_reshaped, value_layer)
310
+
311
+ # change view [batch_size, q_length, num_heads * head_dim]
312
+ context_layer = self._merge_heads(context_layer)
313
+
314
+ # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232
315
+ if self.pretraining_tp > 1 and self.slow_but_exact:
316
+ slices = self.hidden_size / self.pretraining_tp
317
+ output_tensor = torch.zeros_like(context_layer)
318
+ for i in range(self.pretraining_tp):
319
+ output_tensor = output_tensor + F.linear(
320
+ context_layer[:, :, int(i * slices) : int((i + 1) * slices)],
321
+ self.dense.weight[:, int(i * slices) : int((i + 1) * slices)],
322
+ )
323
+ else:
324
+ output_tensor = self.dense(context_layer)
325
+
326
+ output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training)
327
+
328
+ outputs = (output_tensor, present)
329
+ if output_attentions:
330
+ outputs += (attention_probs,)
331
+
332
+ return outputs
333
+
334
+
335
+ class BloomMLP(nn.Module):
336
+ def __init__(self, config: BloomConfig):
337
+ super().__init__()
338
+ hidden_size = config.hidden_size
339
+
340
+ self.pretraining_tp = config.pretraining_tp
341
+ self.slow_but_exact = config.slow_but_exact
342
+ self.dense_h_to_4h = nn.Linear(hidden_size, 4 * hidden_size)
343
+ self.gelu_impl = BloomGelu()
344
+ self.dense_4h_to_h = nn.Linear(4 * hidden_size, hidden_size)
345
+ self.hidden_dropout = config.hidden_dropout
346
+
347
+ def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
348
+ hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states))
349
+
350
+ if self.pretraining_tp > 1 and self.slow_but_exact:
351
+ intermediate_output = torch.zeros_like(residual)
352
+ slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp
353
+ for i in range(self.pretraining_tp):
354
+ intermediate_output = intermediate_output + F.linear(
355
+ hidden_states[:, :, int(i * slices) : int((i + 1) * slices)],
356
+ self.dense_4h_to_h.weight[:, int(i * slices) : int((i + 1) * slices)],
357
+ )
358
+ else:
359
+ intermediate_output = self.dense_4h_to_h(hidden_states)
360
+
361
+ output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training)
362
+
363
+ return output
364
+
365
+
366
+ class BloomBlock(nn.Module):
367
+ def __init__(self, config: BloomConfig):
368
+ super().__init__()
369
+ hidden_size = config.hidden_size
370
+
371
+ self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
372
+ self.num_heads = config.n_head
373
+ self.self_attention = BloomAttention(config)
374
+ self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
375
+
376
+ self.mlp = BloomMLP(config)
377
+
378
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
379
+ self.hidden_dropout = config.hidden_dropout
380
+
381
+ def forward(
382
+ self,
383
+ hidden_states: torch.Tensor,
384
+ alibi: torch.Tensor,
385
+ attention_mask: torch.Tensor,
386
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
387
+ head_mask: Optional[torch.Tensor] = None,
388
+ use_cache: bool = False,
389
+ output_attentions: bool = False,
390
+ ):
391
+ # hidden_states: [batch_size, seq_length, hidden_size]
392
+
393
+ # Layer norm at the beginning of the transformer layer.
394
+ layernorm_output = self.input_layernorm(hidden_states)
395
+
396
+ # Layer norm post the self attention.
397
+ if self.apply_residual_connection_post_layernorm:
398
+ residual = layernorm_output
399
+ else:
400
+ residual = hidden_states
401
+
402
+ # Self attention.
403
+ attn_outputs = self.self_attention(
404
+ layernorm_output,
405
+ residual,
406
+ layer_past=layer_past,
407
+ attention_mask=attention_mask,
408
+ alibi=alibi,
409
+ head_mask=head_mask,
410
+ use_cache=use_cache,
411
+ output_attentions=output_attentions,
412
+ )
413
+
414
+ attention_output = attn_outputs[0]
415
+
416
+ outputs = attn_outputs[1:]
417
+
418
+ layernorm_output = self.post_attention_layernorm(attention_output)
419
+
420
+ # Get residual
421
+ if self.apply_residual_connection_post_layernorm:
422
+ residual = layernorm_output
423
+ else:
424
+ residual = attention_output
425
+
426
+ # MLP.
427
+ output = self.mlp(layernorm_output, residual)
428
+
429
+ if use_cache:
430
+ outputs = (output,) + outputs
431
+ else:
432
+ outputs = (output,) + outputs[1:]
433
+
434
+ return outputs # hidden_states, present, attentions
435
+
436
+
437
+ class BloomPreTrainedModel(PreTrainedModel):
438
+ config_class = BloomConfig
439
+ base_model_prefix = "transformer"
440
+ supports_gradient_checkpointing = True
441
+ _no_split_modules = ["BloomBlock"]
442
+ _skip_keys_device_placement = "past_key_values"
443
+
444
+ def __init__(self, *inputs, **kwargs):
445
+ super().__init__(*inputs, **kwargs)
446
+
447
+ def _init_weights(self, module: nn.Module):
448
+ """Initialize the weights."""
449
+ if isinstance(module, nn.Linear):
450
+ # Slightly different from the TF version which uses truncated_normal for initialization
451
+ # cf https://github.com/pytorch/pytorch/pull/5617
452
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
453
+ if module.bias is not None:
454
+ module.bias.data.zero_()
455
+ elif isinstance(module, nn.Embedding):
456
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
457
+ if module.padding_idx is not None:
458
+ module.weight.data[module.padding_idx].zero_()
459
+ elif isinstance(module, LayerNorm):
460
+ module.bias.data.zero_()
461
+ module.weight.data.fill_(1.0)
462
+
463
+ @staticmethod
464
+ def _convert_to_standard_cache(
465
+ past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int
466
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
467
+ """
468
+ Standardizes the format of the cache so as to match most implementations, i.e. to tuple(tuple([batch_size,
469
+ num_heads, ...]))
470
+ """
471
+ batch_size_times_num_heads, head_dim, seq_length = past_key_value[0][0].shape
472
+ num_heads = batch_size_times_num_heads // batch_size
473
+ # key: [batch_size * num_heads, head_dim, seq_length] -> [batch_size, num_heads, head_dim, seq_length]
474
+ # value: [batch_size * num_heads, seq_length, head_dim] -> [batch_size, num_heads, seq_length, head_dim]
475
+ return tuple(
476
+ (
477
+ layer_past[0].view(batch_size, num_heads, head_dim, seq_length),
478
+ layer_past[1].view(batch_size, num_heads, seq_length, head_dim),
479
+ )
480
+ for layer_past in past_key_value
481
+ )
482
+
483
+ @staticmethod
484
+ def _convert_to_bloom_cache(
485
+ past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]],
486
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
487
+ """
488
+ Converts the cache to the format expected by Bloom, i.e. to tuple(tuple([batch_size * num_heads, ...]))
489
+ """
490
+ batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape
491
+ batch_size_times_num_heads = batch_size * num_heads
492
+ # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length]
493
+ # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim]
494
+ return tuple(
495
+ (
496
+ layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length),
497
+ layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim),
498
+ )
499
+ for layer_past in past_key_value
500
+ )
501
+
502
+
503
+ BLOOM_START_DOCSTRING = r"""
504
+
505
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
506
+ library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)
507
+
508
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
509
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
510
+ and behavior.
511
+
512
+ Parameters:
513
+ config ([`BloomConfig`]): Model configuration class with all the parameters of the model.
514
+ Initializing with a config file does not load the weights associated with the model, only the
515
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
516
+ """
517
+
518
+ BLOOM_INPUTS_DOCSTRING = r"""
519
+ Args:
520
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
521
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`
522
+ (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
523
+
524
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
525
+ `input_ids`.
526
+
527
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
528
+ [`PreTrainedTokenizer.__call__`] for details.
529
+
530
+ [What are input IDs?](../glossary#input-ids)
531
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
532
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
533
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
534
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
535
+
536
+ Each element of `past_key_values` is a tuple (past_key, past_value):
537
+ - past_key: [batch_size * num_heads, head_dim, kv_length]
538
+ - past_value: [batch_size * num_heads, kv_length, head_dim]
539
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
540
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
541
+
542
+ - 1 for tokens that are **not masked**,
543
+ - 0 for tokens that are **masked**.
544
+
545
+ [What are attention masks?](../glossary#attention-mask)
546
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
547
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
548
+
549
+ - 1 indicates the head is **not masked**,
550
+ - 0 indicates the head is **masked**.
551
+
552
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
553
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
554
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
555
+ model's internal embedding lookup matrix.
556
+
557
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
558
+ `past_key_values`).
559
+ use_cache (`bool`, *optional*):
560
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
561
+ `past_key_values`).
562
+ output_attentions (`bool`, *optional*):
563
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
564
+ tensors for more detail.
565
+ output_hidden_states (`bool`, *optional*):
566
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
567
+ more detail.
568
+ return_dict (`bool`, *optional*):
569
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
570
+ """
571
+
572
+
573
+ @add_start_docstrings(
574
+ "The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.",
575
+ BLOOM_START_DOCSTRING,
576
+ )
577
+ class BloomModel(BloomPreTrainedModel):
578
+ def __init__(self, config: BloomConfig):
579
+ super().__init__(config)
580
+
581
+ self.embed_dim = config.hidden_size
582
+ self.num_heads = config.n_head
583
+
584
+ # Embedding + LN Embedding
585
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim)
586
+ self.word_embeddings_layernorm = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
587
+
588
+ # Transformer blocks
589
+ self.h = nn.ModuleList([BloomBlock(config) for _ in range(config.num_hidden_layers)])
590
+
591
+ # Final Layer Norm
592
+ self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
593
+
594
+ self.gradient_checkpointing = False
595
+
596
+ # Initialize weights and apply final processing
597
+ self.post_init()
598
+
599
+ def build_alibi_tensor(self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
600
+ return build_alibi_tensor(attention_mask, num_heads, dtype)
601
+
602
+ def get_input_embeddings(self):
603
+ return self.word_embeddings
604
+
605
+ def set_input_embeddings(self, new_embeddings: torch.Tensor):
606
+ self.word_embeddings = new_embeddings
607
+
608
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
609
+ @add_code_sample_docstrings(
610
+ checkpoint=_CHECKPOINT_FOR_DOC,
611
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
612
+ config_class=_CONFIG_FOR_DOC,
613
+ )
614
+ def forward(
615
+ self,
616
+ input_ids: Optional[torch.LongTensor] = None,
617
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
618
+ attention_mask: Optional[torch.Tensor] = None,
619
+ head_mask: Optional[torch.LongTensor] = None,
620
+ inputs_embeds: Optional[torch.LongTensor] = None,
621
+ use_cache: Optional[bool] = None,
622
+ output_attentions: Optional[bool] = None,
623
+ output_hidden_states: Optional[bool] = None,
624
+ return_dict: Optional[bool] = None,
625
+ **deprecated_arguments,
626
+ ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
627
+ if deprecated_arguments.pop("position_ids", False) is not False:
628
+ # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
629
+ warnings.warn(
630
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
631
+ " passing `position_ids`.",
632
+ FutureWarning,
633
+ )
634
+ if len(deprecated_arguments) > 0:
635
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
636
+
637
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
638
+ output_hidden_states = (
639
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
640
+ )
641
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
642
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
643
+
644
+ if input_ids is not None and inputs_embeds is not None:
645
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
646
+ elif input_ids is not None:
647
+ batch_size, seq_length = input_ids.shape
648
+ elif inputs_embeds is not None:
649
+ batch_size, seq_length, _ = inputs_embeds.shape
650
+ else:
651
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
652
+
653
+ if past_key_values is None:
654
+ past_key_values = tuple([None] * len(self.h))
655
+
656
+ # Prepare head mask if needed
657
+ # 1.0 in head_mask indicate we keep the head
658
+ # attention_probs has shape batch_size x num_heads x N x N
659
+ # head_mask has shape n_layer x batch x num_heads x N x N
660
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
661
+
662
+ if inputs_embeds is None:
663
+ inputs_embeds = self.word_embeddings(input_ids)
664
+
665
+ hidden_states = self.word_embeddings_layernorm(inputs_embeds)
666
+
667
+ presents = () if use_cache else None
668
+ all_self_attentions = () if output_attentions else None
669
+ all_hidden_states = () if output_hidden_states else None
670
+
671
+ if self.gradient_checkpointing and self.training:
672
+ if use_cache:
673
+ logger.warning_once(
674
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
675
+ )
676
+ use_cache = False
677
+
678
+ # Compute alibi tensor: check build_alibi_tensor documentation
679
+ seq_length_with_past = seq_length
680
+ past_key_values_length = 0
681
+ if past_key_values[0] is not None:
682
+ past_key_values_length = past_key_values[0][0].shape[2]
683
+ seq_length_with_past = seq_length_with_past + past_key_values_length
684
+ if attention_mask is None:
685
+ attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
686
+ else:
687
+ attention_mask = attention_mask.to(hidden_states.device)
688
+
689
+ alibi = self.build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype)
690
+
691
+ causal_mask = _prepare_4d_causal_attention_mask(
692
+ attention_mask,
693
+ input_shape=(batch_size, seq_length),
694
+ inputs_embeds=inputs_embeds,
695
+ past_key_values_length=past_key_values_length,
696
+ )
697
+ causal_mask = causal_mask.bool()
698
+
699
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
700
+ if output_hidden_states:
701
+ all_hidden_states = all_hidden_states + (hidden_states,)
702
+
703
+ if self.gradient_checkpointing and self.training:
704
+ outputs = self._gradient_checkpointing_func(
705
+ block.__call__,
706
+ hidden_states,
707
+ alibi,
708
+ causal_mask,
709
+ layer_past,
710
+ head_mask[i],
711
+ use_cache,
712
+ output_attentions,
713
+ )
714
+ else:
715
+ outputs = block(
716
+ hidden_states,
717
+ layer_past=layer_past,
718
+ attention_mask=causal_mask,
719
+ head_mask=head_mask[i],
720
+ use_cache=use_cache,
721
+ output_attentions=output_attentions,
722
+ alibi=alibi,
723
+ )
724
+
725
+ hidden_states = outputs[0]
726
+ if use_cache is True:
727
+ presents = presents + (outputs[1],)
728
+
729
+ if output_attentions:
730
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
731
+
732
+ # Add last hidden state
733
+ hidden_states = self.ln_f(hidden_states)
734
+
735
+ if output_hidden_states:
736
+ all_hidden_states = all_hidden_states + (hidden_states,)
737
+
738
+ if not return_dict:
739
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
740
+
741
+ return BaseModelOutputWithPastAndCrossAttentions(
742
+ last_hidden_state=hidden_states,
743
+ past_key_values=presents,
744
+ hidden_states=all_hidden_states,
745
+ attentions=all_self_attentions,
746
+ )
747
+
748
+
749
+ @add_start_docstrings(
750
+ """
751
+ The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input
752
+ embeddings).
753
+ """,
754
+ BLOOM_START_DOCSTRING,
755
+ )
756
+ class BloomForCausalLM(BloomPreTrainedModel):
757
+ _tied_weights_keys = ["lm_head.weight"]
758
+
759
+ def __init__(self, config: BloomConfig):
760
+ super().__init__(config)
761
+ self.transformer = BloomModel(config)
762
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
763
+
764
+ # Initialize weights and apply final processing
765
+ self.post_init()
766
+
767
+ def get_output_embeddings(self):
768
+ return self.lm_head
769
+
770
+ def set_output_embeddings(self, new_embeddings: torch.Tensor):
771
+ self.lm_head = new_embeddings
772
+
773
+ def prepare_inputs_for_generation(
774
+ self,
775
+ input_ids: torch.LongTensor,
776
+ past_key_values: Optional[torch.Tensor] = None,
777
+ attention_mask: Optional[torch.Tensor] = None,
778
+ inputs_embeds: Optional[torch.Tensor] = None,
779
+ **kwargs,
780
+ ) -> dict:
781
+ # only last tokens for input_ids if past is not None
782
+ if past_key_values is not None:
783
+ past_length = past_key_values[0][0].shape[2]
784
+
785
+ # Some generation methods already pass only the last input ID
786
+ if input_ids.shape[1] > past_length:
787
+ remove_prefix_length = past_length
788
+ else:
789
+ # Default to old behavior: keep only final ID
790
+ remove_prefix_length = input_ids.shape[1] - 1
791
+
792
+ input_ids = input_ids[:, remove_prefix_length:]
793
+
794
+ # the cache may be in the stardard format (e.g. in contrastive search), convert to bloom's format if needed
795
+ if past_key_values[0][0].shape[0] == input_ids.shape[0]:
796
+ past_key_values = self._convert_to_bloom_cache(past_key_values)
797
+
798
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
799
+ if inputs_embeds is not None and past_key_values is None:
800
+ model_inputs = {"inputs_embeds": inputs_embeds}
801
+ else:
802
+ model_inputs = {"input_ids": input_ids}
803
+
804
+ model_inputs.update(
805
+ {
806
+ "past_key_values": past_key_values,
807
+ "use_cache": kwargs.get("use_cache"),
808
+ "attention_mask": attention_mask,
809
+ }
810
+ )
811
+ return model_inputs
812
+
813
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
814
+ @add_code_sample_docstrings(
815
+ checkpoint=_CHECKPOINT_FOR_DOC,
816
+ output_type=CausalLMOutputWithCrossAttentions,
817
+ config_class=_CONFIG_FOR_DOC,
818
+ )
819
+ def forward(
820
+ self,
821
+ input_ids: Optional[torch.LongTensor] = None,
822
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
823
+ attention_mask: Optional[torch.Tensor] = None,
824
+ head_mask: Optional[torch.Tensor] = None,
825
+ inputs_embeds: Optional[torch.Tensor] = None,
826
+ labels: Optional[torch.Tensor] = None,
827
+ use_cache: Optional[bool] = None,
828
+ output_attentions: Optional[bool] = None,
829
+ output_hidden_states: Optional[bool] = None,
830
+ return_dict: Optional[bool] = None,
831
+ **deprecated_arguments,
832
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
833
+ r"""
834
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
835
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
836
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
837
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
838
+ """
839
+ if deprecated_arguments.pop("position_ids", False) is not False:
840
+ # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
841
+ warnings.warn(
842
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
843
+ " passing `position_ids`.",
844
+ FutureWarning,
845
+ )
846
+ if len(deprecated_arguments) > 0:
847
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
848
+
849
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
850
+
851
+ transformer_outputs = self.transformer(
852
+ input_ids,
853
+ past_key_values=past_key_values,
854
+ attention_mask=attention_mask,
855
+ head_mask=head_mask,
856
+ inputs_embeds=inputs_embeds,
857
+ use_cache=use_cache,
858
+ output_attentions=output_attentions,
859
+ output_hidden_states=output_hidden_states,
860
+ return_dict=return_dict,
861
+ )
862
+ hidden_states = transformer_outputs[0]
863
+
864
+ lm_logits = self.lm_head(hidden_states)
865
+
866
+ loss = None
867
+ if labels is not None:
868
+ # move labels to correct device to enable model parallelism
869
+ labels = labels.to(lm_logits.device)
870
+ # Shift so that tokens < n predict n
871
+ shift_logits = lm_logits[..., :-1, :].contiguous()
872
+ shift_labels = labels[..., 1:].contiguous()
873
+ batch_size, seq_length, vocab_size = shift_logits.shape
874
+ # Flatten the tokens
875
+ loss_fct = CrossEntropyLoss()
876
+ loss = loss_fct(
877
+ shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
878
+ )
879
+
880
+ if not return_dict:
881
+ output = (lm_logits,) + transformer_outputs[1:]
882
+ return ((loss,) + output) if loss is not None else output
883
+
884
+ return CausalLMOutputWithCrossAttentions(
885
+ loss=loss,
886
+ logits=lm_logits,
887
+ past_key_values=transformer_outputs.past_key_values,
888
+ hidden_states=transformer_outputs.hidden_states,
889
+ attentions=transformer_outputs.attentions,
890
+ )
891
+
892
+ def _reorder_cache(
893
+ self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
894
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
895
+ """
896
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
897
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
898
+ beam_idx at every generation step.
899
+
900
+ Output shares the same memory storage as `past`.
901
+ """
902
+ standardized_past = self._convert_to_standard_cache(past, batch_size=len(beam_idx))
903
+
904
+ # Get a copy of `beam_idx` on all the devices where we need those indices.
905
+ device_to_beam_idx = {
906
+ past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past
907
+ }
908
+ reordered_past = tuple(
909
+ (
910
+ layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
911
+ layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
912
+ )
913
+ for layer_past in standardized_past
914
+ )
915
+ return self._convert_to_bloom_cache(reordered_past)
916
+
917
+
918
+ @add_start_docstrings(
919
+ """
920
+ The Bloom Model transformer with a sequence classification head on top (linear layer).
921
+
922
+ [`BloomForSequenceClassification`] uses the last token in order to do the classification, as other causal models
923
+ (e.g. GPT-1) do.
924
+
925
+ Since it does classification on the last token, it requires to know the position of the last token. If a
926
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
927
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
928
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
929
+ each row of the batch).
930
+ """,
931
+ BLOOM_START_DOCSTRING,
932
+ )
933
+ class BloomForSequenceClassification(BloomPreTrainedModel):
934
+ def __init__(self, config: BloomConfig):
935
+ super().__init__(config)
936
+ self.num_labels = config.num_labels
937
+ self.transformer = BloomModel(config)
938
+ self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
939
+
940
+ # Initialize weights and apply final processing
941
+ self.post_init()
942
+
943
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
944
+ @add_code_sample_docstrings(
945
+ checkpoint=_CHECKPOINT_FOR_DOC,
946
+ output_type=SequenceClassifierOutputWithPast,
947
+ config_class=_CONFIG_FOR_DOC,
948
+ )
949
+ def forward(
950
+ self,
951
+ input_ids: Optional[torch.LongTensor] = None,
952
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
953
+ attention_mask: Optional[torch.Tensor] = None,
954
+ head_mask: Optional[torch.Tensor] = None,
955
+ inputs_embeds: Optional[torch.Tensor] = None,
956
+ labels: Optional[torch.Tensor] = None,
957
+ use_cache: Optional[bool] = None,
958
+ output_attentions: Optional[bool] = None,
959
+ output_hidden_states: Optional[bool] = None,
960
+ return_dict: Optional[bool] = None,
961
+ **deprecated_arguments,
962
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
963
+ r"""
964
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
965
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
966
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
967
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
968
+ """
969
+ if deprecated_arguments.pop("position_ids", False) is not False:
970
+ # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
971
+ warnings.warn(
972
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
973
+ " passing `position_ids`.",
974
+ FutureWarning,
975
+ )
976
+ if len(deprecated_arguments) > 0:
977
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
978
+
979
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
980
+
981
+ transformer_outputs = self.transformer(
982
+ input_ids,
983
+ past_key_values=past_key_values,
984
+ attention_mask=attention_mask,
985
+ head_mask=head_mask,
986
+ inputs_embeds=inputs_embeds,
987
+ use_cache=use_cache,
988
+ output_attentions=output_attentions,
989
+ output_hidden_states=output_hidden_states,
990
+ return_dict=return_dict,
991
+ )
992
+
993
+ hidden_states = transformer_outputs[0]
994
+ logits = self.score(hidden_states)
995
+
996
+ if input_ids is not None:
997
+ batch_size = input_ids.shape[0]
998
+ else:
999
+ batch_size = inputs_embeds.shape[0]
1000
+
1001
+ if self.config.pad_token_id is None and batch_size != 1:
1002
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1003
+ if self.config.pad_token_id is None:
1004
+ sequence_lengths = -1
1005
+ else:
1006
+ if input_ids is not None:
1007
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1008
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1009
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1010
+ sequence_lengths = sequence_lengths.to(logits.device)
1011
+ else:
1012
+ sequence_lengths = -1
1013
+ logger.warning(
1014
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1015
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1016
+ )
1017
+
1018
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1019
+
1020
+ loss = None
1021
+ if labels is not None:
1022
+ if self.config.problem_type is None:
1023
+ if self.num_labels == 1:
1024
+ self.config.problem_type = "regression"
1025
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1026
+ self.config.problem_type = "single_label_classification"
1027
+ else:
1028
+ self.config.problem_type = "multi_label_classification"
1029
+
1030
+ if self.config.problem_type == "regression":
1031
+ loss_fct = MSELoss()
1032
+ if self.num_labels == 1:
1033
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1034
+ else:
1035
+ loss = loss_fct(pooled_logits, labels)
1036
+ elif self.config.problem_type == "single_label_classification":
1037
+ loss_fct = CrossEntropyLoss()
1038
+ loss = loss_fct(pooled_logits, labels)
1039
+ elif self.config.problem_type == "multi_label_classification":
1040
+ loss_fct = BCEWithLogitsLoss()
1041
+ loss = loss_fct(pooled_logits, labels)
1042
+ if not return_dict:
1043
+ output = (pooled_logits,) + transformer_outputs[1:]
1044
+ return ((loss,) + output) if loss is not None else output
1045
+
1046
+ return SequenceClassifierOutputWithPast(
1047
+ loss=loss,
1048
+ logits=pooled_logits,
1049
+ past_key_values=transformer_outputs.past_key_values,
1050
+ hidden_states=transformer_outputs.hidden_states,
1051
+ attentions=transformer_outputs.attentions,
1052
+ )
1053
+
1054
+
1055
+ @add_start_docstrings(
1056
+ """
1057
+ Bloom Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1058
+ Named-Entity-Recognition (NER) tasks.
1059
+ """,
1060
+ BLOOM_START_DOCSTRING,
1061
+ )
1062
+ class BloomForTokenClassification(BloomPreTrainedModel):
1063
+ def __init__(self, config: BloomConfig):
1064
+ super().__init__(config)
1065
+ self.num_labels = config.num_labels
1066
+
1067
+ self.transformer = BloomModel(config)
1068
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1069
+ classifier_dropout = config.classifier_dropout
1070
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1071
+ classifier_dropout = config.hidden_dropout
1072
+ else:
1073
+ classifier_dropout = 0.1
1074
+ self.dropout = nn.Dropout(classifier_dropout)
1075
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1076
+
1077
+ # Initialize weights and apply final processing
1078
+ self.post_init()
1079
+
1080
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
1081
+ @add_code_sample_docstrings(
1082
+ checkpoint=_CHECKPOINT_FOR_DOC,
1083
+ output_type=TokenClassifierOutput,
1084
+ config_class=_CONFIG_FOR_DOC,
1085
+ )
1086
+ def forward(
1087
+ self,
1088
+ input_ids: Optional[torch.LongTensor] = None,
1089
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1090
+ attention_mask: Optional[torch.Tensor] = None,
1091
+ head_mask: Optional[torch.Tensor] = None,
1092
+ inputs_embeds: Optional[torch.Tensor] = None,
1093
+ labels: Optional[torch.Tensor] = None,
1094
+ use_cache: Optional[bool] = None,
1095
+ output_attentions: Optional[bool] = None,
1096
+ output_hidden_states: Optional[bool] = None,
1097
+ return_dict: Optional[bool] = None,
1098
+ **deprecated_arguments,
1099
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1100
+ r"""
1101
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1102
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1103
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1104
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1105
+ """
1106
+ if deprecated_arguments.pop("position_ids", False) is not False:
1107
+ # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
1108
+ warnings.warn(
1109
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
1110
+ " passing `position_ids`.",
1111
+ FutureWarning,
1112
+ )
1113
+ if len(deprecated_arguments) > 0:
1114
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
1115
+
1116
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1117
+
1118
+ transformer_outputs = self.transformer(
1119
+ input_ids,
1120
+ past_key_values=past_key_values,
1121
+ attention_mask=attention_mask,
1122
+ head_mask=head_mask,
1123
+ inputs_embeds=inputs_embeds,
1124
+ use_cache=use_cache,
1125
+ output_attentions=output_attentions,
1126
+ output_hidden_states=output_hidden_states,
1127
+ return_dict=return_dict,
1128
+ )
1129
+
1130
+ hidden_states = transformer_outputs[0]
1131
+ hidden_states = self.dropout(hidden_states)
1132
+ logits = self.classifier(hidden_states)
1133
+
1134
+ loss = None
1135
+ if labels is not None:
1136
+ # move labels to correct device to enable model parallelism
1137
+ labels = labels.to(logits.device)
1138
+ batch_size, seq_length = labels.shape
1139
+ loss_fct = CrossEntropyLoss()
1140
+ loss = loss_fct(
1141
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1142
+ )
1143
+
1144
+ if not return_dict:
1145
+ output = (logits,) + transformer_outputs[2:]
1146
+ return ((loss,) + output) if loss is not None else output
1147
+
1148
+ return TokenClassifierOutput(
1149
+ loss=loss,
1150
+ logits=logits,
1151
+ hidden_states=transformer_outputs.hidden_states,
1152
+ attentions=transformer_outputs.attentions,
1153
+ )
1154
+
1155
+
1156
+ @add_start_docstrings(
1157
+ """
1158
+ The BLOOM Model transformer with a span classification head on top for extractive question-answering tasks like
1159
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1160
+ """,
1161
+ BLOOM_START_DOCSTRING,
1162
+ )
1163
+ class BloomForQuestionAnswering(BloomPreTrainedModel):
1164
+ def __init__(self, config):
1165
+ super().__init__(config)
1166
+ self.transformer = BloomModel(config)
1167
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1168
+
1169
+ # Initialize weights and apply final processing
1170
+ self.post_init()
1171
+
1172
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1173
+ def forward(
1174
+ self,
1175
+ input_ids: Optional[torch.LongTensor] = None,
1176
+ attention_mask: Optional[torch.FloatTensor] = None,
1177
+ position_ids: Optional[torch.LongTensor] = None,
1178
+ head_mask: Optional[torch.FloatTensor] = None,
1179
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1180
+ start_positions: Optional[torch.LongTensor] = None,
1181
+ end_positions: Optional[torch.LongTensor] = None,
1182
+ output_attentions: Optional[bool] = None,
1183
+ output_hidden_states: Optional[bool] = None,
1184
+ return_dict: Optional[bool] = None,
1185
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1186
+ r"""
1187
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1188
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1189
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1190
+ are not taken into account for computing the loss.
1191
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1192
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1193
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1194
+ are not taken into account for computing the loss.
1195
+ """
1196
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1197
+
1198
+ outputs = self.transformer(
1199
+ input_ids,
1200
+ attention_mask=attention_mask,
1201
+ position_ids=position_ids,
1202
+ head_mask=head_mask,
1203
+ inputs_embeds=inputs_embeds,
1204
+ output_attentions=output_attentions,
1205
+ output_hidden_states=output_hidden_states,
1206
+ return_dict=return_dict,
1207
+ )
1208
+
1209
+ sequence_output = outputs[0]
1210
+
1211
+ logits = self.qa_outputs(sequence_output)
1212
+ start_logits, end_logits = logits.split(1, dim=-1)
1213
+ start_logits = start_logits.squeeze(-1).contiguous()
1214
+ end_logits = end_logits.squeeze(-1).contiguous()
1215
+
1216
+ total_loss = None
1217
+ if start_positions is not None and end_positions is not None:
1218
+ # If we are on multi-GPU, split add a dimension
1219
+ if len(start_positions.size()) > 1:
1220
+ start_positions = start_positions.squeeze(-1)
1221
+ if len(end_positions.size()) > 1:
1222
+ end_positions = end_positions.squeeze(-1)
1223
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1224
+ ignored_index = start_logits.size(1)
1225
+ start_positions = start_positions.clamp(0, ignored_index)
1226
+ end_positions = end_positions.clamp(0, ignored_index)
1227
+
1228
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1229
+ start_loss = loss_fct(start_logits, start_positions)
1230
+ end_loss = loss_fct(end_logits, end_positions)
1231
+ total_loss = (start_loss + end_loss) / 2
1232
+
1233
+ if not return_dict:
1234
+ output = (start_logits, end_logits) + outputs[2:]
1235
+ return ((total_loss,) + output) if total_loss is not None else output
1236
+
1237
+ return QuestionAnsweringModelOutput(
1238
+ loss=total_loss,
1239
+ start_logits=start_logits,
1240
+ end_logits=end_logits,
1241
+ hidden_states=outputs.hidden_states,
1242
+ attentions=outputs.attentions,
1243
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/modeling_flax_bloom.py ADDED
@@ -0,0 +1,734 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc. Team and Bigscience Workshop. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Flax BLOOM model."""
16
+
17
+ import math
18
+ from functools import partial
19
+ from typing import Optional, Tuple
20
+
21
+ import flax.linen as nn
22
+ import jax
23
+ import jax.numpy as jnp
24
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
25
+ from flax.linen import combine_masks, dot_product_attention_weights, make_causal_mask
26
+ from flax.linen.activation import tanh
27
+ from flax.traverse_util import flatten_dict, unflatten_dict
28
+ from jax import lax
29
+
30
+ from ...modeling_flax_outputs import (
31
+ FlaxBaseModelOutput,
32
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
33
+ FlaxCausalLMOutput,
34
+ )
35
+ from ...modeling_flax_utils import FlaxPreTrainedModel, append_call_sample_docstring
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
37
+ from .configuration_bloom import BloomConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "bigscience/bloom"
43
+ _CONFIG_FOR_DOC = "BloomConfig"
44
+
45
+
46
+ BLOOM_START_DOCSTRING = r"""
47
+
48
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
49
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
50
+ etc.)
51
+
52
+ This model is also a Flax Linen
53
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
54
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
55
+
56
+ Finally, this model supports inherent JAX features such as:
57
+
58
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
59
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
60
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
61
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
62
+
63
+ Parameters:
64
+ config ([`BloomConfig`]): Model configuration class with all the parameters of the model.
65
+ Initializing with a config file does not load the weights associated with the model, only the
66
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
67
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
68
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
69
+ `jax.numpy.bfloat16` (on TPUs).
70
+
71
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
72
+ specified all the computation will be performed with the given `dtype`.
73
+
74
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
75
+ parameters.**
76
+
77
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
78
+ [`~FlaxPreTrainedModel.to_bf16`].
79
+ """
80
+
81
+ BLOOM_INPUTS_DOCSTRING = r"""
82
+ Args:
83
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
84
+ `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
85
+
86
+ Indices can be obtained using [`BloomTokenizer`]. See [`PreTrainedTokenizer.encode`] and
87
+ [`PreTrainedTokenizer.__call__`] for details.
88
+
89
+ [What are input IDs?](../glossary#input-ids)
90
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
91
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
92
+
93
+ - 1 for tokens that are **not masked**,
94
+ - 0 for tokens that are **masked**.
95
+
96
+ [What are attention masks?](../glossary#attention-mask)
97
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
98
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
99
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
100
+ output_attentions (`bool`, *optional*):
101
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
102
+ tensors for more detail.
103
+ output_hidden_states (`bool`, *optional*):
104
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
105
+ more detail.
106
+ return_dict (`bool`, *optional*):
107
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
108
+ """
109
+
110
+
111
+ def build_alibi_tensor(attention_mask: jnp.ndarray, num_heads: int, dtype: Optional[jnp.dtype] = jnp.float32):
112
+ """
113
+ Flax implementation of the BLOOM Alibi tensor. BLOOM Alibi tensor is not causal as the original paper mentions, it
114
+ relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value
115
+ `softmax(l+a) = softmax(l)`. Based on
116
+ https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
117
+ Link to paper: https://arxiv.org/abs/2108.12409
118
+
119
+ Args:
120
+ attention_mask (`jnp.ndarray`):
121
+ Token-wise attention mask, this should be of shape `(batch_size, max_seq_len)`.
122
+ num_heads (`int`):
123
+ Number of attention heads.
124
+ dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
125
+ The data type (dtype) of the output tensor.
126
+
127
+ Returns: Alibi tensor of shape `(batch_size * num_heads, 1, max_seq_len)`.
128
+ """
129
+ batch_size, seq_length = attention_mask.shape
130
+ closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
131
+ base = jnp.array(2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), dtype=jnp.float32)
132
+ powers = jnp.arange(1, 1 + closest_power_of_2, dtype=jnp.float32)
133
+ slopes = jax.lax.pow(base, powers)
134
+
135
+ if closest_power_of_2 != num_heads:
136
+ extra_base = jnp.array(2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), dtype=jnp.float32)
137
+ num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
138
+ extra_powers = jnp.arange(1, 1 + 2 * num_remaining_heads, 2, dtype=jnp.float32)
139
+ slopes = jnp.cat([slopes, jax.lax.pow(extra_base, extra_powers)], axis=0)
140
+
141
+ # Note: the Alibi tensor will added to the attention bias that will be applied to the query, key product of attention
142
+ # therefore, Alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
143
+ # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
144
+ # so that the query_length dimension will then be broadcast correctly.
145
+ # This is more or less identical to T5's relative position bias:
146
+ # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
147
+ arange_tensor = ((attention_mask.cumsum(axis=-1) - 1) * attention_mask)[:, None, :]
148
+ alibi = slopes[..., None] * arange_tensor
149
+ alibi = jnp.expand_dims(alibi, axis=2)
150
+ return jnp.asarray(alibi, dtype)
151
+
152
+
153
+ class FlaxBloomAttention(nn.Module):
154
+ config: BloomConfig
155
+ dtype: jnp.dtype = jnp.float32
156
+
157
+ def setup(self):
158
+ self.hidden_size = self.config.hidden_size
159
+ self.num_heads = self.config.n_head
160
+ self.head_dim = self.hidden_size // self.num_heads
161
+ self.attention_softmax_in_fp32 = self.dtype is not jnp.float32
162
+
163
+ if self.head_dim * self.num_heads != self.hidden_size:
164
+ raise ValueError(
165
+ f"`hidden_size` must be divisible by `num_heads` (got `hidden_size`: {self.hidden_size} and "
166
+ f"`num_heads`: {self.num_heads})."
167
+ )
168
+
169
+ dense = partial(
170
+ nn.Dense,
171
+ dtype=self.dtype,
172
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
173
+ )
174
+
175
+ self.query_key_value = dense(self.hidden_size * 3)
176
+ self.dense = dense(self.hidden_size)
177
+ self.resid_dropout = nn.Dropout(rate=self.config.hidden_dropout)
178
+
179
+ def _split_heads(self, hidden_states):
180
+ return hidden_states.reshape(hidden_states.shape[:-1] + (self.num_heads, self.head_dim * 3))
181
+
182
+ def _merge_heads(self, hidden_states):
183
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,))
184
+
185
+ @nn.compact
186
+ # Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJAttention._concatenate_to_cache
187
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
188
+ """
189
+ This function takes projected key, value states from a single input token and concatenates the states to cached
190
+ states from previous steps. This function is slighly adapted from the official Flax repository:
191
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
192
+ """
193
+ # detect if we're initializing by absence of existing cache data.
194
+ is_initialized = self.has_variable("cache", "cached_key")
195
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
196
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
197
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
198
+
199
+ if is_initialized:
200
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
201
+ # update key, value caches with our new 1d spatial slices
202
+ cur_index = cache_index.value
203
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
204
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
205
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
206
+ cached_key.value = key
207
+ cached_value.value = value
208
+ num_updated_cache_vectors = query.shape[1]
209
+ cache_index.value = cache_index.value + num_updated_cache_vectors
210
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key
211
+ # positions that have already been generated and cached, not the remaining zero elements.
212
+ pad_mask = jnp.broadcast_to(
213
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
214
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
215
+ )
216
+ attention_mask = combine_masks(pad_mask, attention_mask)
217
+ return key, value, attention_mask
218
+
219
+ def __call__(
220
+ self,
221
+ hidden_states,
222
+ residual,
223
+ alibi,
224
+ attention_mask=None,
225
+ deterministic: bool = True,
226
+ init_cache: bool = False,
227
+ output_attentions: bool = False,
228
+ ):
229
+ batch_size, seq_length = hidden_states.shape[:2]
230
+
231
+ # proj q, k, v
232
+ fused_qkv = self.query_key_value(hidden_states)
233
+ fused_qkv = self._split_heads(fused_qkv)
234
+ query, key, value = jnp.split(fused_qkv, 3, axis=-1)
235
+
236
+ causal_attention_mask = make_causal_mask(attention_mask, dtype="bool")
237
+
238
+ # for fast decoding causal attention mask should be shifted
239
+ causal_attention_mask_shift = (
240
+ self.variables["cache"]["cache_index"] if self.has_variable("cache", "cached_key") else 0
241
+ )
242
+
243
+ # fast decoding for generate requires special attention_mask
244
+ if self.has_variable("cache", "cached_key"):
245
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
246
+ causal_attention_mask = jax.lax.dynamic_slice(
247
+ causal_attention_mask,
248
+ (0, 0, causal_attention_mask_shift, 0),
249
+ (1, 1, seq_length, max_decoder_length),
250
+ )
251
+
252
+ # broadcast causal attention mask & attention mask to fit for merge
253
+ causal_attention_mask = jnp.broadcast_to(
254
+ causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:]
255
+ )
256
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape)
257
+ attention_mask = combine_masks(attention_mask, causal_attention_mask)
258
+
259
+ dropout_rng = None
260
+ if not deterministic and self.config.attention_dropout > 0.0:
261
+ dropout_rng = self.make_rng("dropout")
262
+
263
+ # During fast autoregressive decoding, we feed one position at a time,
264
+ # and cache the keys and values step by step.
265
+ if self.has_variable("cache", "cached_key") or init_cache:
266
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
267
+
268
+ # transform boolean mask into float mask
269
+ mask_value = jnp.finfo(self.dtype).min
270
+ attention_bias = lax.select(
271
+ attention_mask > 0,
272
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
273
+ jnp.full(attention_mask.shape, mask_value).astype(self.dtype),
274
+ )
275
+
276
+ attention_bias = attention_bias + alibi
277
+
278
+ # Cast in fp32 if the original dtype is different from fp32
279
+ attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype
280
+
281
+ attn_weights = dot_product_attention_weights(
282
+ query,
283
+ key,
284
+ bias=attention_bias,
285
+ dropout_rng=dropout_rng,
286
+ dropout_rate=self.config.attention_dropout,
287
+ deterministic=deterministic,
288
+ dtype=attention_dtype,
289
+ )
290
+
291
+ # Cast back in the original dtype if the native dtype is not fp32
292
+ if self.attention_softmax_in_fp32:
293
+ attn_weights = attn_weights.astype(self.dtype)
294
+
295
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
296
+ attn_output = self._merge_heads(attn_output)
297
+ attn_output = self.dense(attn_output)
298
+ attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
299
+
300
+ attn_output = attn_output + residual
301
+
302
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
303
+ return outputs
304
+
305
+
306
+ class BloomGELU(nn.Module):
307
+ def setup(self):
308
+ self.dtype = jnp.float32
309
+
310
+ def __call__(self, x):
311
+ return x * 0.5 * (1.0 + tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
312
+
313
+
314
+ class FlaxBloomMLP(nn.Module):
315
+ config: BloomConfig
316
+ dtype: jnp.dtype = jnp.float32
317
+
318
+ def setup(self):
319
+ hidden_size = self.config.hidden_size
320
+
321
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
322
+
323
+ self.dense_h_to_4h = nn.Dense(4 * hidden_size, dtype=self.dtype, kernel_init=kernel_init)
324
+ self.dense_4h_to_h = nn.Dense(hidden_size, dtype=self.dtype, kernel_init=kernel_init)
325
+ self.hidden_dropout = nn.Dropout(self.config.hidden_dropout)
326
+ self.act = BloomGELU()
327
+
328
+ def __call__(self, hidden_states, residual, deterministic: bool = True):
329
+ hidden_states = self.dense_h_to_4h(hidden_states)
330
+ hidden_states = self.act(hidden_states)
331
+
332
+ intermediate_output = self.dense_4h_to_h(hidden_states)
333
+
334
+ intermediate_output = intermediate_output + residual
335
+ hidden_states = self.hidden_dropout(intermediate_output, deterministic=deterministic)
336
+
337
+ return hidden_states
338
+
339
+
340
+ class FlaxBloomBlock(nn.Module):
341
+ config: BloomConfig
342
+ dtype: jnp.dtype = jnp.float32
343
+
344
+ def setup(self):
345
+ self.input_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
346
+
347
+ self.self_attention = FlaxBloomAttention(self.config, dtype=self.dtype)
348
+ self.post_attention_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
349
+
350
+ self.mlp = FlaxBloomMLP(self.config, dtype=self.dtype)
351
+
352
+ self.apply_residual_connection_post_layernorm = self.config.apply_residual_connection_post_layernorm
353
+ self.hidden_dropout = self.config.hidden_dropout
354
+
355
+ def __call__(
356
+ self,
357
+ hidden_states,
358
+ alibi,
359
+ attention_mask=None,
360
+ deterministic: bool = True,
361
+ init_cache: bool = False,
362
+ output_attentions: bool = False,
363
+ ):
364
+ layernorm_output = self.input_layernorm(hidden_states)
365
+
366
+ # layer norm before saving residual if config calls for it
367
+ if self.apply_residual_connection_post_layernorm:
368
+ residual = layernorm_output
369
+ else:
370
+ residual = hidden_states
371
+
372
+ # self-attention
373
+ attn_outputs = self.self_attention(
374
+ layernorm_output,
375
+ residual=residual,
376
+ alibi=alibi,
377
+ attention_mask=attention_mask,
378
+ deterministic=deterministic,
379
+ init_cache=init_cache,
380
+ output_attentions=output_attentions,
381
+ )
382
+
383
+ attention_output = attn_outputs[0]
384
+
385
+ outputs = attn_outputs[1:]
386
+
387
+ post_layernorm = self.post_attention_layernorm(attention_output)
388
+
389
+ # set residual based on config
390
+ if self.apply_residual_connection_post_layernorm:
391
+ residual = post_layernorm
392
+ else:
393
+ residual = attention_output
394
+
395
+ output = self.mlp(post_layernorm, residual, deterministic=deterministic)
396
+
397
+ outputs = (output,) + outputs
398
+
399
+ return outputs
400
+
401
+
402
+ class FlaxBloomPreTrainedModel(FlaxPreTrainedModel):
403
+ """
404
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
405
+ models.
406
+ """
407
+
408
+ config_class = BloomConfig
409
+ base_model_prefix = "transformer"
410
+ module_class: nn.Module = None
411
+
412
+ def __init__(
413
+ self,
414
+ config: BloomConfig,
415
+ input_shape: Tuple = (1, 1),
416
+ seed: int = 0,
417
+ dtype: jnp.dtype = jnp.float32,
418
+ _do_init: bool = True,
419
+ **kwargs,
420
+ ):
421
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
422
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
423
+
424
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
425
+ # init input tensors
426
+ input_ids = jnp.zeros(input_shape, dtype="i4")
427
+ attention_mask = jnp.ones_like(input_ids)
428
+ params_rng, dropout_rng = jax.random.split(rng)
429
+ rngs = {"params": params_rng, "dropout": dropout_rng}
430
+
431
+ random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"]
432
+
433
+ if params is not None:
434
+ random_params = flatten_dict(unfreeze(random_params))
435
+ params = flatten_dict(unfreeze(params))
436
+ for missing_key in self._missing_keys:
437
+ params[missing_key] = random_params[missing_key]
438
+ self._missing_keys = set()
439
+ return freeze(unflatten_dict(params))
440
+ else:
441
+ return random_params
442
+
443
+ def init_cache(self, batch_size, max_length):
444
+ r"""
445
+ Args:
446
+ batch_size (`int`):
447
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
448
+ max_length (`int`):
449
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
450
+ cache.
451
+ """
452
+ # init input variables to retrieve cache
453
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
454
+ attention_mask = jnp.ones_like(input_ids)
455
+
456
+ init_variables = self.module.init(
457
+ jax.random.PRNGKey(0), input_ids, attention_mask, return_dict=False, init_cache=True
458
+ )
459
+ return unfreeze(init_variables["cache"])
460
+
461
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
462
+ def __call__(
463
+ self,
464
+ input_ids,
465
+ attention_mask=None,
466
+ past_key_values: dict = None,
467
+ params: dict = None,
468
+ dropout_rng: jax.random.PRNGKey = None,
469
+ train: bool = False,
470
+ output_attentions: Optional[bool] = None,
471
+ output_hidden_states: Optional[bool] = None,
472
+ return_dict: Optional[bool] = None,
473
+ ):
474
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
475
+ output_hidden_states = (
476
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
477
+ )
478
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
479
+
480
+ batch_size, sequence_length = input_ids.shape
481
+
482
+ if attention_mask is None:
483
+ attention_mask = jnp.ones((batch_size, sequence_length))
484
+
485
+ # Handle any PRNG if needed
486
+ rngs = {}
487
+ if dropout_rng is not None:
488
+ rngs["dropout"] = dropout_rng
489
+
490
+ inputs = {"params": params or self.params}
491
+
492
+ # If past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
493
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
494
+ # changed by FlaxBloomAttention module
495
+ if past_key_values:
496
+ inputs["cache"] = past_key_values
497
+ mutable = ["cache"]
498
+ else:
499
+ mutable = False
500
+
501
+ outputs = self.module.apply(
502
+ inputs,
503
+ jnp.array(input_ids, dtype="i4"),
504
+ jnp.array(attention_mask, dtype="i4"),
505
+ not train,
506
+ False,
507
+ output_attentions,
508
+ output_hidden_states,
509
+ return_dict,
510
+ rngs=rngs,
511
+ mutable=mutable,
512
+ )
513
+
514
+ # add updated cache to model output
515
+ if past_key_values is not None and return_dict:
516
+ outputs, past_key_values = outputs
517
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
518
+ return outputs
519
+ elif past_key_values is not None and not return_dict:
520
+ outputs, past_key_values = outputs
521
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
522
+
523
+ return outputs
524
+
525
+
526
+ class FlaxBloomBlockCollection(nn.Module):
527
+ config: BloomConfig
528
+ dtype: jnp.dtype = jnp.float32
529
+
530
+ def setup(self):
531
+ self.layers = [
532
+ FlaxBloomBlock(self.config, name=str(layer_number), dtype=self.dtype)
533
+ for layer_number in range(self.config.num_hidden_layers)
534
+ ]
535
+
536
+ def __call__(
537
+ self,
538
+ hidden_states,
539
+ alibi,
540
+ attention_mask=None,
541
+ deterministic: bool = True,
542
+ init_cache: bool = False,
543
+ output_attentions: bool = False,
544
+ output_hidden_states: bool = False,
545
+ ):
546
+ all_attentions = () if output_attentions else None
547
+ all_hidden_states = () if output_hidden_states else None
548
+
549
+ for layer_number in range(self.config.num_hidden_layers):
550
+ if output_hidden_states:
551
+ all_hidden_states += (hidden_states,)
552
+
553
+ layer_outputs = self.layers[layer_number](
554
+ hidden_states,
555
+ alibi=alibi,
556
+ attention_mask=attention_mask,
557
+ deterministic=deterministic,
558
+ init_cache=init_cache,
559
+ output_attentions=output_attentions,
560
+ )
561
+ hidden_states = layer_outputs[0]
562
+
563
+ if output_attentions:
564
+ all_attentions += (layer_outputs[1],)
565
+
566
+ # this contains possible `None` values - `FlaxBloomModule` will filter them out
567
+ outputs = (hidden_states, all_hidden_states, all_attentions)
568
+
569
+ return outputs
570
+
571
+
572
+ class FlaxBloomModule(nn.Module):
573
+ config: BloomConfig
574
+ dtype: jnp.dtype = jnp.float32
575
+
576
+ def setup(self):
577
+ self.embed_dim = self.config.hidden_size
578
+
579
+ # word embeddings (no positional embedding layer)
580
+ self.word_embeddings = nn.Embed(
581
+ self.config.vocab_size,
582
+ self.embed_dim,
583
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
584
+ dtype=self.dtype,
585
+ )
586
+
587
+ # post-embedding layernorm
588
+ self.word_embeddings_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
589
+
590
+ # transformer layers
591
+ self.h = FlaxBloomBlockCollection(self.config, dtype=self.dtype)
592
+
593
+ # final layernorm
594
+ self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
595
+
596
+ def __call__(
597
+ self,
598
+ input_ids=None,
599
+ attention_mask=None,
600
+ deterministic=True,
601
+ init_cache: bool = False,
602
+ output_attentions: bool = False,
603
+ output_hidden_states: bool = False,
604
+ return_dict: bool = True,
605
+ ):
606
+ inputs_embeds = self.word_embeddings(input_ids)
607
+ # do post-embedding layernorm
608
+ hidden_states = self.word_embeddings_layernorm(inputs_embeds)
609
+
610
+ # build alibi depending on `attention_mask`
611
+ alibi = build_alibi_tensor(attention_mask, self.config.n_head, dtype=hidden_states.dtype)
612
+
613
+ outputs = self.h(
614
+ hidden_states,
615
+ alibi=alibi,
616
+ attention_mask=attention_mask,
617
+ deterministic=deterministic,
618
+ init_cache=init_cache,
619
+ output_hidden_states=output_hidden_states,
620
+ output_attentions=output_attentions,
621
+ )
622
+
623
+ hidden_states = outputs[0]
624
+ hidden_states = self.ln_f(hidden_states)
625
+
626
+ if output_hidden_states:
627
+ all_hidden_states = outputs[1] + (hidden_states,)
628
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
629
+ else:
630
+ outputs = (hidden_states,) + outputs[1:]
631
+
632
+ if not return_dict:
633
+ return tuple(v for v in [outputs[0], outputs[-1]] if v is not None)
634
+
635
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
636
+ last_hidden_state=hidden_states,
637
+ hidden_states=outputs[1],
638
+ attentions=outputs[-1],
639
+ )
640
+
641
+
642
+ @add_start_docstrings(
643
+ "The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.",
644
+ BLOOM_START_DOCSTRING,
645
+ )
646
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoModel with GPTNeo->Bloom
647
+ class FlaxBloomModel(FlaxBloomPreTrainedModel):
648
+ module_class = FlaxBloomModule
649
+
650
+
651
+ append_call_sample_docstring(FlaxBloomModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC)
652
+
653
+
654
+ class FlaxBloomForCausalLMModule(nn.Module):
655
+ config: BloomConfig
656
+ dtype: jnp.dtype = jnp.float32
657
+
658
+ def setup(self):
659
+ self.transformer = FlaxBloomModule(self.config, dtype=self.dtype)
660
+ self.lm_head = nn.Dense(
661
+ self.config.vocab_size,
662
+ use_bias=False,
663
+ dtype=self.dtype,
664
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
665
+ )
666
+
667
+ def __call__(
668
+ self,
669
+ input_ids,
670
+ attention_mask,
671
+ deterministic: bool = True,
672
+ init_cache: bool = False,
673
+ output_attentions: bool = False,
674
+ output_hidden_states: bool = False,
675
+ return_dict: bool = True,
676
+ ):
677
+ outputs = self.transformer(
678
+ input_ids,
679
+ attention_mask=attention_mask,
680
+ deterministic=deterministic,
681
+ init_cache=init_cache,
682
+ output_attentions=output_attentions,
683
+ output_hidden_states=output_hidden_states,
684
+ return_dict=return_dict,
685
+ )
686
+
687
+ hidden_states = outputs[0]
688
+
689
+ if self.config.tie_word_embeddings:
690
+ shared_kernel = self.transformer.variables["params"]["word_embeddings"]["embedding"].T
691
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
692
+ else:
693
+ lm_logits = self.lm_head(hidden_states)
694
+
695
+ if not return_dict:
696
+ return (lm_logits,) + outputs[1:]
697
+
698
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
699
+
700
+
701
+ @add_start_docstrings(
702
+ """
703
+ The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input
704
+ embeddings).
705
+ """,
706
+ BLOOM_START_DOCSTRING,
707
+ )
708
+ class FlaxBloomForCausalLM(FlaxBloomPreTrainedModel):
709
+ module_class = FlaxBloomForCausalLMModule
710
+
711
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
712
+ # initializing the cache
713
+ batch_size, seq_length = input_ids.shape
714
+
715
+ past_key_values = self.init_cache(batch_size, max_length)
716
+ # Note that usually one would have to put 0's in the attention_mask for
717
+ # x > input_ids.shape[-1] and x < cache_length. But since Bloom uses a causal mask,
718
+ # those positions are masked anyway. Thus, we can create a single static attention_mask here,
719
+ # which is more efficient for compilation
720
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
721
+ if attention_mask is not None:
722
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
723
+
724
+ return {
725
+ "past_key_values": past_key_values,
726
+ "attention_mask": extended_attention_mask,
727
+ }
728
+
729
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
730
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
731
+ return model_kwargs
732
+
733
+
734
+ append_call_sample_docstring(FlaxBloomForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC)
llmeval-env/lib/python3.10/site-packages/transformers/models/bloom/tokenization_bloom_fast.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Bloom."""
16
+
17
+
18
+ import pickle
19
+ from typing import Optional, Tuple
20
+
21
+ from ...tokenization_utils_base import BatchEncoding
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"}
29
+
30
+
31
+ class BloomTokenizerFast(PreTrainedTokenizerFast):
32
+ """
33
+ Construct a "fast" Bloom tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
34
+ Byte-Pair-Encoding.
35
+
36
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
37
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
38
+
39
+ ```python
40
+ >>> from transformers import BloomTokenizerFast
41
+
42
+ >>> tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom")
43
+ >>> tokenizer("Hello world")["input_ids"]
44
+ [59414, 8876]
45
+
46
+ >>> tokenizer(" Hello world")["input_ids"]
47
+ [86153, 8876]
48
+ ```
49
+
50
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
51
+ the model was not pretrained this way, it might yield a decrease in performance.
52
+
53
+ <Tip>
54
+
55
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
56
+
57
+ </Tip>
58
+
59
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
60
+ refer to this superclass for more information regarding those methods.
61
+
62
+ Args:
63
+ vocab_file (`str`):
64
+ Path to the vocabulary file.
65
+ merges_file (`str`):
66
+ Path to the merges file.
67
+ errors (`str`, *optional*, defaults to `"replace"`):
68
+ Paradigm to follow when decoding bytes to UTF-8. See
69
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
70
+ unk_token (`str`, *optional*, defaults to `<|endoftext|>`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ bos_token (`str`, *optional*, defaults to `<|endoftext|>`):
74
+ The beginning of sequence token.
75
+ eos_token (`str`, *optional*, defaults to `<|endoftext|>`):
76
+ The end of sequence token.
77
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
78
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
79
+ other word. (Bloom tokenizer detect beginning of words by the preceding space).
80
+ trim_offsets (`bool`, *optional*, defaults to `True`):
81
+ Whether or not the post-processing step should trim offsets to avoid including whitespaces.
82
+ """
83
+
84
+ vocab_files_names = VOCAB_FILES_NAMES
85
+ model_input_names = ["input_ids", "attention_mask"]
86
+ slow_tokenizer_class = None
87
+ # No `max_model_input_sizes` as BLOOM uses ALiBi positional embeddings
88
+
89
+ def __init__(
90
+ self,
91
+ vocab_file=None,
92
+ merges_file=None,
93
+ tokenizer_file=None,
94
+ unk_token="<unk>",
95
+ bos_token="<s>",
96
+ eos_token="</s>",
97
+ pad_token="<pad>",
98
+ add_prefix_space=False,
99
+ clean_up_tokenization_spaces=False,
100
+ **kwargs,
101
+ ):
102
+ super().__init__(
103
+ vocab_file,
104
+ merges_file,
105
+ tokenizer_file=tokenizer_file,
106
+ unk_token=unk_token,
107
+ bos_token=bos_token,
108
+ eos_token=eos_token,
109
+ pad_token=pad_token,
110
+ add_prefix_space=add_prefix_space,
111
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
112
+ **kwargs,
113
+ )
114
+ # TODO @ArthurZucker this can only work one way for now, to update later-on. Tests should also properly
115
+ # check this as they were green before.
116
+ pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer)
117
+ decoder_state = pickle.dumps(self.backend_tokenizer.decoder)
118
+
119
+ if add_prefix_space:
120
+ pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
121
+ decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
122
+ self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state)
123
+ self.backend_tokenizer.decoder = pickle.loads(decoder_state)
124
+
125
+ self.add_prefix_space = add_prefix_space
126
+
127
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
128
+ is_split_into_words = kwargs.get("is_split_into_words", False)
129
+ if not (self.add_prefix_space or not is_split_into_words):
130
+ raise Exception(
131
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
132
+ " pretokenized inputs."
133
+ )
134
+
135
+ return super()._batch_encode_plus(*args, **kwargs)
136
+
137
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
138
+ is_split_into_words = kwargs.get("is_split_into_words", False)
139
+
140
+ if not (self.add_prefix_space or not is_split_into_words):
141
+ raise Exception(
142
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
143
+ " pretokenized inputs."
144
+ )
145
+
146
+ return super()._encode_plus(*args, **kwargs)
147
+
148
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
149
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
150
+ return tuple(files)
151
+
152
+ @property
153
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.default_chat_template
154
+ def default_chat_template(self):
155
+ """
156
+ A simple chat template that ignores role information and just concatenates messages with EOS tokens.
157
+ """
158
+ logger.warning_once(
159
+ "\nNo chat template is defined for this tokenizer - using the default template "
160
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
161
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
162
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
163
+ )
164
+ return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
llmeval-env/lib/python3.10/site-packages/transformers/models/esm/__init__.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Facebook and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig"],
21
+ "tokenization_esm": ["EsmTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_esm"] = [
31
+ "ESM_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "EsmForMaskedLM",
33
+ "EsmForSequenceClassification",
34
+ "EsmForTokenClassification",
35
+ "EsmModel",
36
+ "EsmPreTrainedModel",
37
+ ]
38
+ _import_structure["modeling_esmfold"] = ["EsmForProteinFolding", "EsmFoldPreTrainedModel"]
39
+
40
+ try:
41
+ if not is_tf_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_tf_esm"] = [
47
+ "TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "TFEsmForMaskedLM",
49
+ "TFEsmForSequenceClassification",
50
+ "TFEsmForTokenClassification",
51
+ "TFEsmModel",
52
+ "TFEsmPreTrainedModel",
53
+ ]
54
+
55
+ if TYPE_CHECKING:
56
+ from .configuration_esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig
57
+ from .tokenization_esm import EsmTokenizer
58
+
59
+ try:
60
+ if not is_torch_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ from .modeling_esm import (
66
+ ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
67
+ EsmForMaskedLM,
68
+ EsmForSequenceClassification,
69
+ EsmForTokenClassification,
70
+ EsmModel,
71
+ EsmPreTrainedModel,
72
+ )
73
+ from .modeling_esmfold import EsmFoldPreTrainedModel, EsmForProteinFolding
74
+
75
+ try:
76
+ if not is_tf_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ from .modeling_tf_esm import (
82
+ TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
83
+ TFEsmForMaskedLM,
84
+ TFEsmForSequenceClassification,
85
+ TFEsmForTokenClassification,
86
+ TFEsmModel,
87
+ TFEsmPreTrainedModel,
88
+ )
89
+
90
+
91
+ else:
92
+ import sys
93
+
94
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
llmeval-env/lib/python3.10/site-packages/transformers/models/esm/configuration_esm.py ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ESM model configuration"""
16
+
17
+ from dataclasses import asdict, dataclass
18
+ from typing import Optional
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ # TODO Update this
27
+
28
+ from ..deprecated._archive_maps import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class EsmConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`ESMModel`]. It is used to instantiate a ESM model
34
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the ESM
36
+ [facebook/esm-1b](https://huggingface.co/facebook/esm-1b) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*):
44
+ Vocabulary size of the ESM model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`ESMModel`].
46
+ mask_token_id (`int`, *optional*):
47
+ The index of the mask token in the vocabulary. This must be included in the config because of the
48
+ "mask-dropout" scaling trick, which will scale the inputs depending on the number of masked tokens.
49
+ pad_token_id (`int`, *optional*):
50
+ The index of the padding token in the vocabulary. This must be included in the config because certain parts
51
+ of the ESM code use this instead of the attention mask.
52
+ hidden_size (`int`, *optional*, defaults to 768):
53
+ Dimensionality of the encoder layers and the pooler layer.
54
+ num_hidden_layers (`int`, *optional*, defaults to 12):
55
+ Number of hidden layers in the Transformer encoder.
56
+ num_attention_heads (`int`, *optional*, defaults to 12):
57
+ Number of attention heads for each attention layer in the Transformer encoder.
58
+ intermediate_size (`int`, *optional*, defaults to 3072):
59
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the attention probabilities.
64
+ max_position_embeddings (`int`, *optional*, defaults to 1026):
65
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
66
+ just in case (e.g., 512 or 1024 or 2048).
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
70
+ The epsilon used by the layer normalization layers.
71
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
72
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query", "rotary"`.
73
+ For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
74
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
75
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
76
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
77
+ is_decoder (`bool`, *optional*, defaults to `False`):
78
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
79
+ use_cache (`bool`, *optional*, defaults to `True`):
80
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
81
+ relevant if `config.is_decoder=True`.
82
+ emb_layer_norm_before (`bool`, *optional*):
83
+ Whether to apply layer normalization after embeddings but before the main stem of the network.
84
+ token_dropout (`bool`, defaults to `False`):
85
+ When this is enabled, masked tokens are treated as if they had been dropped out by input dropout.
86
+
87
+ Examples:
88
+
89
+ ```python
90
+ >>> from transformers import EsmModel, EsmConfig
91
+
92
+ >>> # Initializing a ESM facebook/esm-1b style configuration >>> configuration = EsmConfig()
93
+
94
+ >>> # Initializing a model from the configuration >>> model = ESMModel(configuration)
95
+
96
+ >>> # Accessing the model configuration >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "esm"
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=None,
104
+ mask_token_id=None,
105
+ pad_token_id=None,
106
+ hidden_size=768,
107
+ num_hidden_layers=12,
108
+ num_attention_heads=12,
109
+ intermediate_size=3072,
110
+ hidden_dropout_prob=0.1,
111
+ attention_probs_dropout_prob=0.1,
112
+ max_position_embeddings=1026,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ position_embedding_type="absolute",
116
+ use_cache=True,
117
+ emb_layer_norm_before=None,
118
+ token_dropout=False,
119
+ is_folding_model=False,
120
+ esmfold_config=None,
121
+ vocab_list=None,
122
+ **kwargs,
123
+ ):
124
+ super().__init__(pad_token_id=pad_token_id, mask_token_id=mask_token_id, **kwargs)
125
+
126
+ self.vocab_size = vocab_size
127
+ self.hidden_size = hidden_size
128
+ self.num_hidden_layers = num_hidden_layers
129
+ self.num_attention_heads = num_attention_heads
130
+ self.intermediate_size = intermediate_size
131
+ self.hidden_dropout_prob = hidden_dropout_prob
132
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
133
+ self.max_position_embeddings = max_position_embeddings
134
+ self.initializer_range = initializer_range
135
+ self.layer_norm_eps = layer_norm_eps
136
+ self.position_embedding_type = position_embedding_type
137
+ self.use_cache = use_cache
138
+ self.emb_layer_norm_before = emb_layer_norm_before
139
+ self.token_dropout = token_dropout
140
+ self.is_folding_model = is_folding_model
141
+ if is_folding_model:
142
+ if esmfold_config is None:
143
+ logger.info("No esmfold_config supplied for folding model, using default values.")
144
+ esmfold_config = EsmFoldConfig()
145
+ elif isinstance(esmfold_config, dict):
146
+ esmfold_config = EsmFoldConfig(**esmfold_config)
147
+ self.esmfold_config = esmfold_config
148
+ if vocab_list is None:
149
+ logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!")
150
+ self.vocab_list = get_default_vocab_list()
151
+ else:
152
+ self.vocab_list = vocab_list
153
+ else:
154
+ self.esmfold_config = None
155
+ self.vocab_list = None
156
+ if self.esmfold_config is not None and getattr(self.esmfold_config, "use_esm_attn_map", False):
157
+ raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!")
158
+
159
+ def to_dict(self):
160
+ """
161
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
162
+
163
+ Returns:
164
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
165
+ """
166
+ output = super().to_dict()
167
+ if isinstance(self.esmfold_config, EsmFoldConfig):
168
+ output["esmfold_config"] = self.esmfold_config.to_dict()
169
+ return output
170
+
171
+
172
+ @dataclass
173
+ class EsmFoldConfig:
174
+ esm_type: str = None
175
+ fp16_esm: bool = True
176
+ use_esm_attn_map: bool = False
177
+ esm_ablate_pairwise: bool = False
178
+ esm_ablate_sequence: bool = False
179
+ esm_input_dropout: float = 0
180
+
181
+ embed_aa: bool = True
182
+ bypass_lm: bool = False
183
+
184
+ lddt_head_hid_dim: int = 128
185
+ trunk: "TrunkConfig" = None
186
+
187
+ def __post_init__(self):
188
+ if self.trunk is None:
189
+ self.trunk = TrunkConfig()
190
+ elif isinstance(self.trunk, dict):
191
+ self.trunk = TrunkConfig(**self.trunk)
192
+
193
+ def to_dict(self):
194
+ """
195
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
196
+
197
+ Returns:
198
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
199
+ """
200
+ output = asdict(self)
201
+ output["trunk"] = self.trunk.to_dict()
202
+ return output
203
+
204
+
205
+ @dataclass
206
+ class TrunkConfig:
207
+ num_blocks: int = 48
208
+ sequence_state_dim: int = 1024
209
+ pairwise_state_dim: int = 128
210
+ sequence_head_width: int = 32
211
+ pairwise_head_width: int = 32
212
+ position_bins: int = 32
213
+ dropout: float = 0
214
+ layer_drop: float = 0
215
+ cpu_grad_checkpoint: bool = False
216
+ max_recycles: int = 4
217
+ chunk_size: Optional[int] = 128
218
+ structure_module: "StructureModuleConfig" = None
219
+
220
+ def __post_init__(self):
221
+ if self.structure_module is None:
222
+ self.structure_module = StructureModuleConfig()
223
+ elif isinstance(self.structure_module, dict):
224
+ self.structure_module = StructureModuleConfig(**self.structure_module)
225
+
226
+ if self.max_recycles <= 0:
227
+ raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}.")
228
+ if self.sequence_state_dim % self.sequence_state_dim != 0:
229
+ raise ValueError(
230
+ "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
231
+ f" {self.sequence_state_dim} and {self.sequence_state_dim}."
232
+ )
233
+ if self.pairwise_state_dim % self.pairwise_state_dim != 0:
234
+ raise ValueError(
235
+ "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
236
+ f" {self.pairwise_state_dim} and {self.pairwise_state_dim}."
237
+ )
238
+
239
+ sequence_num_heads = self.sequence_state_dim // self.sequence_head_width
240
+ pairwise_num_heads = self.pairwise_state_dim // self.pairwise_head_width
241
+
242
+ if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
243
+ raise ValueError(
244
+ "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
245
+ f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}."
246
+ )
247
+ if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
248
+ raise ValueError(
249
+ "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
250
+ f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}."
251
+ )
252
+ if self.pairwise_state_dim % 2 != 0:
253
+ raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.")
254
+
255
+ if self.dropout >= 0.4:
256
+ raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}.")
257
+
258
+ def to_dict(self):
259
+ """
260
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
261
+
262
+ Returns:
263
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
264
+ """
265
+ output = asdict(self)
266
+ output["structure_module"] = self.structure_module.to_dict()
267
+ return output
268
+
269
+
270
+ @dataclass
271
+ class StructureModuleConfig:
272
+ """
273
+ Args:
274
+ sequence_dim:
275
+ Single representation channel dimension
276
+ pairwise_dim:
277
+ Pair representation channel dimension
278
+ ipa_dim:
279
+ IPA hidden channel dimension
280
+ resnet_dim:
281
+ Angle resnet (Alg. 23 lines 11-14) hidden channel dimension
282
+ num_heads_ipa:
283
+ Number of IPA heads
284
+ num_qk_points:
285
+ Number of query/key points to generate during IPA
286
+ num_v_points:
287
+ Number of value points to generate during IPA
288
+ dropout_rate:
289
+ Dropout rate used throughout the layer
290
+ num_blocks:
291
+ Number of structure module blocks
292
+ num_transition_layers:
293
+ Number of layers in the single representation transition (Alg. 23 lines 8-9)
294
+ num_resnet_blocks:
295
+ Number of blocks in the angle resnet
296
+ num_angles:
297
+ Number of angles to generate in the angle resnet
298
+ trans_scale_factor:
299
+ Scale of single representation transition hidden dimension
300
+ epsilon:
301
+ Small number used in angle resnet normalization
302
+ inf:
303
+ Large number used for attention masking
304
+ """
305
+
306
+ sequence_dim: int = 384
307
+ pairwise_dim: int = 128
308
+ ipa_dim: int = 16
309
+ resnet_dim: int = 128
310
+ num_heads_ipa: int = 12
311
+ num_qk_points: int = 4
312
+ num_v_points: int = 8
313
+ dropout_rate: float = 0.1
314
+ num_blocks: int = 8
315
+ num_transition_layers: int = 1
316
+ num_resnet_blocks: int = 2
317
+ num_angles: int = 7
318
+ trans_scale_factor: int = 10
319
+ epsilon: float = 1e-8
320
+ inf: float = 1e5
321
+
322
+ def to_dict(self):
323
+ return asdict(self)
324
+
325
+
326
+ def get_default_vocab_list():
327
+ return (
328
+ "<cls>",
329
+ "<pad>",
330
+ "<eos>",
331
+ "<unk>",
332
+ "L",
333
+ "A",
334
+ "G",
335
+ "V",
336
+ "S",
337
+ "E",
338
+ "R",
339
+ "T",
340
+ "I",
341
+ "D",
342
+ "P",
343
+ "K",
344
+ "Q",
345
+ "N",
346
+ "F",
347
+ "Y",
348
+ "M",
349
+ "H",
350
+ "W",
351
+ "C",
352
+ "X",
353
+ "B",
354
+ "U",
355
+ "Z",
356
+ "O",
357
+ ".",
358
+ "-",
359
+ "<null_1>",
360
+ "<mask>",
361
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/esm/convert_esm.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ESM checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import pathlib
20
+ from pathlib import Path
21
+ from tempfile import TemporaryDirectory
22
+
23
+ import esm as esm_module
24
+ import torch
25
+ from esm.esmfold.v1.misc import batch_encode_sequences as esmfold_encode_sequences
26
+ from esm.esmfold.v1.pretrained import esmfold_v1
27
+
28
+ from transformers.models.esm.configuration_esm import EsmConfig, EsmFoldConfig
29
+ from transformers.models.esm.modeling_esm import (
30
+ EsmForMaskedLM,
31
+ EsmForSequenceClassification,
32
+ EsmIntermediate,
33
+ EsmLayer,
34
+ EsmOutput,
35
+ EsmSelfAttention,
36
+ EsmSelfOutput,
37
+ )
38
+ from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
39
+ from transformers.models.esm.tokenization_esm import EsmTokenizer
40
+ from transformers.utils import logging
41
+
42
+
43
+ logging.set_verbosity_info()
44
+ logger = logging.get_logger(__name__)
45
+
46
+ SAMPLE_DATA = [
47
+ (
48
+ "protein1",
49
+ "MNGTEGPNFYVPFSNATGVVRSPFEYPQYYLAEPWQFSMLAAYMFLLIVLGFPINFLTLYVTVQHKKLRTPLNYILLNLAVADLFMVLGGFTSTLYTSLHGYFVFGPTGCNLEGFFATLGGEIALWSLVVLAIERYVVVCKPMSNFRFGENHAIMGVAFTWVMALACAAPPLAGWSRYIPEGLQCSCGIDYYTLKPEVNNESFVIYMFVVHFTIPMIIIFFCYGQLVFTVKEAAAQQQESATTQKAEKEVTRMVIIMVIAFLICWVPYASVAFYIFTHQGSNFGPIFMTIPAFFAKSAAIYNPVIYIMMNKQFRNCMLTTICCGKNPLGDDEASATVSKTETSQVAPA",
50
+ ),
51
+ ("protein2", "MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLA"),
52
+ ("protein3", "MKTVRQERLKSI<mask>RILERSKEPVSGAQLAEELS<mask>SRQVIVQDIAYLRSLGYN<mask>VATPRGYVLAGG"),
53
+ ("protein4", "MKTVRQERLKSI<mask>RILERSKEPVSGAQLAEELS<mask>SRQVIVQDIAYLRSLGYN<mask>VATPRGYVLA"),
54
+ ]
55
+
56
+ MODEL_MAPPING = {
57
+ "esm1b_t33_650M_UR50S": esm_module.pretrained.esm1b_t33_650M_UR50S,
58
+ "esm1v_t33_650M_UR90S_1": esm_module.pretrained.esm1v_t33_650M_UR90S_1,
59
+ "esm1v_t33_650M_UR90S_2": esm_module.pretrained.esm1v_t33_650M_UR90S_2,
60
+ "esm1v_t33_650M_UR90S_3": esm_module.pretrained.esm1v_t33_650M_UR90S_3,
61
+ "esm1v_t33_650M_UR90S_4": esm_module.pretrained.esm1v_t33_650M_UR90S_4,
62
+ "esm1v_t33_650M_UR90S_5": esm_module.pretrained.esm1v_t33_650M_UR90S_5,
63
+ "esm2_t48_15B_UR50D": esm_module.pretrained.esm2_t48_15B_UR50D,
64
+ "esm2_t36_3B_UR50D": esm_module.pretrained.esm2_t36_3B_UR50D,
65
+ "esm2_t33_650M_UR50D": esm_module.pretrained.esm2_t33_650M_UR50D,
66
+ "esm2_t30_150M_UR50D": esm_module.pretrained.esm2_t30_150M_UR50D,
67
+ "esm2_t12_35M_UR50D": esm_module.pretrained.esm2_t12_35M_UR50D,
68
+ "esm2_t6_8M_UR50D": esm_module.pretrained.esm2_t6_8M_UR50D,
69
+ "esmfold_v1": esmfold_v1,
70
+ }
71
+
72
+ restypes = list("ARNDCQEGHILKMFPSTWYV")
73
+
74
+ restypes_with_x = restypes + ["X"]
75
+ restypes_with_extras = restypes_with_x + ["<pad>", "<mask>", "<cls>", "<sep>", "<eos>"]
76
+
77
+
78
+ def get_esmfold_tokenizer():
79
+ with TemporaryDirectory() as tempdir:
80
+ vocab = "\n".join(restypes_with_extras)
81
+ vocab_file = Path(tempdir) / "vocab.txt"
82
+ vocab_file.write_text(vocab)
83
+ hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file))
84
+ hf_tokenizer.pad_token_id = 0 # Overlaps with 'A' but that seems to be what they want
85
+ return hf_tokenizer
86
+
87
+
88
+ def transfer_and_check_weights(original_module, our_module):
89
+ status = our_module.load_state_dict(original_module.state_dict())
90
+ if status.missing_keys:
91
+ raise ValueError(f"Missing keys: {status.missing_keys}")
92
+ if status.unexpected_keys:
93
+ raise ValueError(f"Unexpected keys: {status.unexpected_keys}")
94
+
95
+
96
+ def convert_esm_checkpoint_to_pytorch(
97
+ model: str, pytorch_dump_folder_path: str, classification_head: bool, push_to_repo: str, auth_token: str
98
+ ):
99
+ """
100
+ Copy/paste/tweak esm's weights to our BERT structure.
101
+ """
102
+ if model.startswith("esmfold"):
103
+ esm = MODEL_MAPPING[model]()
104
+ else:
105
+ esm, alphabet = MODEL_MAPPING[model]()
106
+ esm.eval() # disable dropout
107
+
108
+ if model.startswith("esmfold"):
109
+ embed_dim = esm.esm.embed_dim
110
+ num_layers = esm.esm.num_layers
111
+ num_attention_heads = esm.esm.attention_heads
112
+ intermediate_size = 4 * embed_dim
113
+ token_dropout = esm.esm.token_dropout
114
+ emb_layer_norm_before = False # This code path does not exist in ESM-2
115
+ position_embedding_type = "rotary"
116
+ is_folding_model = True
117
+ esmfold_config = EsmFoldConfig()
118
+ for key, val in esm.cfg.items():
119
+ if hasattr(esmfold_config, key) and key != "trunk":
120
+ setattr(esmfold_config, key, val)
121
+ for key, val in esm.cfg.trunk.items():
122
+ if hasattr(esmfold_config.trunk, key) and key != "structure_module":
123
+ setattr(esmfold_config.trunk, key, val)
124
+ for key, val in esm.cfg.trunk.structure_module.items():
125
+ if hasattr(esmfold_config.trunk.structure_module, key):
126
+ setattr(esmfold_config.trunk.structure_module, key, val)
127
+ elif hasattr(esm, "args"):
128
+ # Indicates an ESM-1b or ESM-1v model
129
+ embed_dim = esm.args.embed_dim
130
+ num_layers = esm.args.layers
131
+ num_attention_heads = esm.args.attention_heads
132
+ intermediate_size = esm.args.ffn_embed_dim
133
+ token_dropout = esm.args.token_dropout
134
+ emb_layer_norm_before = True if esm.emb_layer_norm_before else False
135
+ position_embedding_type = "absolute"
136
+ is_folding_model = False
137
+ esmfold_config = None
138
+ else:
139
+ # Indicates an ESM-2 model
140
+ embed_dim = esm.embed_dim
141
+ num_layers = esm.num_layers
142
+ num_attention_heads = esm.attention_heads
143
+ intermediate_size = 4 * embed_dim # This is hardcoded in ESM-2
144
+ token_dropout = esm.token_dropout
145
+ emb_layer_norm_before = False # This code path does not exist in ESM-2
146
+ position_embedding_type = "rotary"
147
+ is_folding_model = False
148
+ esmfold_config = None
149
+
150
+ if is_folding_model:
151
+ alphabet = esm.esm.alphabet
152
+ vocab_list = tuple(alphabet.all_toks)
153
+ mask_token_id = alphabet.mask_idx
154
+ pad_token_id = alphabet.padding_idx
155
+
156
+ if is_folding_model:
157
+ original_esm_model = esm.esm
158
+ else:
159
+ original_esm_model = esm
160
+
161
+ config = EsmConfig(
162
+ vocab_size=original_esm_model.embed_tokens.num_embeddings,
163
+ mask_token_id=mask_token_id,
164
+ hidden_size=embed_dim,
165
+ num_hidden_layers=num_layers,
166
+ num_attention_heads=num_attention_heads,
167
+ intermediate_size=intermediate_size,
168
+ max_position_embeddings=1026,
169
+ layer_norm_eps=1e-5, # PyTorch default used in fairseq
170
+ attention_probs_dropout_prob=0.0,
171
+ hidden_dropout_prob=0.0,
172
+ pad_token_id=pad_token_id,
173
+ emb_layer_norm_before=emb_layer_norm_before,
174
+ token_dropout=token_dropout,
175
+ position_embedding_type=position_embedding_type,
176
+ is_folding_model=is_folding_model,
177
+ esmfold_config=esmfold_config,
178
+ vocab_list=vocab_list,
179
+ )
180
+ if classification_head:
181
+ config.num_labels = esm.classification_heads["mnli"].out_proj.weight.shape[0]
182
+ print("Our ESM config:", config)
183
+
184
+ if model.startswith("esmfold"):
185
+ model_class = EsmForProteinFolding
186
+ elif classification_head:
187
+ model_class = EsmForSequenceClassification
188
+ else:
189
+ model_class = EsmForMaskedLM
190
+ model = model_class(config)
191
+ model.eval()
192
+
193
+ # Now let's copy all the weights.
194
+ # Embeddings
195
+ model.esm.embeddings.word_embeddings.weight = original_esm_model.embed_tokens.weight
196
+ if position_embedding_type == "absolute":
197
+ model.esm.embeddings.position_embeddings.weight = original_esm_model.embed_positions.weight
198
+
199
+ if config.emb_layer_norm_before:
200
+ model.esm.embeddings.layer_norm.weight = original_esm_model.emb_layer_norm_before.weight
201
+ model.esm.embeddings.layer_norm.bias = original_esm_model.emb_layer_norm_before.bias
202
+
203
+ model.esm.encoder.emb_layer_norm_after.weight = original_esm_model.emb_layer_norm_after.weight
204
+ model.esm.encoder.emb_layer_norm_after.bias = original_esm_model.emb_layer_norm_after.bias
205
+
206
+ for i in range(config.num_hidden_layers):
207
+ # Encoder: start of layer
208
+ layer: EsmLayer = model.esm.encoder.layer[i]
209
+ # esm_layer: TransformerSentenceEncoderLayer = original_esm_model.layers[i]
210
+ esm_layer = original_esm_model.layers[i]
211
+
212
+ # self attention
213
+ self_attn: EsmSelfAttention = layer.attention.self
214
+ assert (
215
+ esm_layer.self_attn.k_proj.weight.data.shape
216
+ == esm_layer.self_attn.q_proj.weight.data.shape
217
+ == esm_layer.self_attn.v_proj.weight.data.shape
218
+ == torch.Size((config.hidden_size, config.hidden_size))
219
+ )
220
+
221
+ self_attn.query.weight.data = esm_layer.self_attn.q_proj.weight
222
+ self_attn.query.bias.data = esm_layer.self_attn.q_proj.bias
223
+ self_attn.key.weight.data = esm_layer.self_attn.k_proj.weight
224
+ self_attn.key.bias.data = esm_layer.self_attn.k_proj.bias
225
+ self_attn.value.weight.data = esm_layer.self_attn.v_proj.weight
226
+ self_attn.value.bias.data = esm_layer.self_attn.v_proj.bias
227
+
228
+ if getattr(esm_layer.self_attn, "rot_emb", None) is not None:
229
+ # Matt: Although inv_freq is not a trainable weight, it is computed at model init and cached.
230
+ # During the training of ESM-2 the model was converted to float16 precision, which also converts
231
+ # the inv_freq tensor, and the loss of precision remains even if the model is loaded later as float32.
232
+ # If we recompute inv_freq without this loss of precision then we will get subtly different rotary
233
+ # embeddings, which are enough to cause significant discrepancies in model outputs. To avoid this,
234
+ # we make sure the new model copies the data from the old inv_freq.
235
+ self_attn.rotary_embeddings.inv_freq.data = esm_layer.self_attn.rot_emb.inv_freq
236
+
237
+ # LayerNorm changes for pre-activation
238
+ layer.attention.LayerNorm.weight = esm_layer.self_attn_layer_norm.weight
239
+ layer.attention.LayerNorm.bias = esm_layer.self_attn_layer_norm.bias
240
+ layer.LayerNorm.weight = esm_layer.final_layer_norm.weight
241
+ layer.LayerNorm.bias = esm_layer.final_layer_norm.bias
242
+
243
+ # self-attention output
244
+ self_output: EsmSelfOutput = layer.attention.output
245
+ assert self_output.dense.weight.shape == esm_layer.self_attn.out_proj.weight.shape
246
+ self_output.dense.weight = esm_layer.self_attn.out_proj.weight
247
+ self_output.dense.bias = esm_layer.self_attn.out_proj.bias
248
+
249
+ # intermediate
250
+ intermediate: EsmIntermediate = layer.intermediate
251
+ assert intermediate.dense.weight.shape == esm_layer.fc1.weight.shape
252
+ intermediate.dense.weight = esm_layer.fc1.weight
253
+ intermediate.dense.bias = esm_layer.fc1.bias
254
+
255
+ # output
256
+ bert_output: EsmOutput = layer.output
257
+ assert bert_output.dense.weight.shape == esm_layer.fc2.weight.shape
258
+ bert_output.dense.weight = esm_layer.fc2.weight
259
+ bert_output.dense.bias = esm_layer.fc2.bias
260
+ # end of layer
261
+
262
+ if is_folding_model:
263
+ model.esm_s_combine.data = esm.esm_s_combine.data
264
+ model.af2_to_esm.data = esm.af2_to_esm.data
265
+ transfer_and_check_weights(esm.embedding, model.embedding)
266
+ transfer_and_check_weights(esm.esm_s_mlp, model.esm_s_mlp)
267
+ transfer_and_check_weights(esm.trunk, model.trunk)
268
+ transfer_and_check_weights(esm.distogram_head, model.distogram_head)
269
+ transfer_and_check_weights(esm.ptm_head, model.ptm_head)
270
+ transfer_and_check_weights(esm.lm_head, model.lm_head)
271
+ transfer_and_check_weights(esm.lddt_head, model.lddt_head)
272
+
273
+ elif classification_head:
274
+ model.classifier.dense.weight = esm.esm.classification_heads["mnli"].dense.weight
275
+ model.classifier.dense.bias = esm.classification_heads["mnli"].dense.bias
276
+ model.classifier.out_proj.weight = esm.classification_heads["mnli"].out_proj.weight
277
+ model.classifier.out_proj.bias = esm.classification_heads["mnli"].out_proj.bias
278
+ else:
279
+ # LM Head
280
+ model.lm_head.dense.weight = esm.lm_head.dense.weight
281
+ model.lm_head.dense.bias = esm.lm_head.dense.bias
282
+ model.lm_head.layer_norm.weight = esm.lm_head.layer_norm.weight
283
+ model.lm_head.layer_norm.bias = esm.lm_head.layer_norm.bias
284
+ model.lm_head.decoder.weight = esm.lm_head.weight
285
+ model.lm_head.bias = esm.lm_head.bias
286
+
287
+ # Contact prediction head
288
+ transfer_and_check_weights(esm.contact_head, model.esm.contact_head)
289
+
290
+ # Prepare data (first 2 sequences from ESMStructuralSplitDataset superfamily / 4)
291
+ if is_folding_model:
292
+ # Folding models aren't trained on masked inputs and don't like mask tokens.
293
+ sample_data = SAMPLE_DATA[:2]
294
+ else:
295
+ sample_data = SAMPLE_DATA
296
+
297
+ if is_folding_model:
298
+ hf_tokenizer = get_esmfold_tokenizer()
299
+ hf_tokens = hf_tokenizer(
300
+ [row[1] for row in sample_data], return_tensors="pt", padding=True, add_special_tokens=False
301
+ )
302
+ esmfold_aas, esmfold_mask, _, _, _ = esmfold_encode_sequences([row[1] for row in sample_data])
303
+ success = torch.all(hf_tokens["input_ids"] == esmfold_aas) and torch.all(
304
+ hf_tokens["attention_mask"] == esmfold_mask
305
+ )
306
+ else:
307
+ # Let's check that we get the same results.
308
+ batch_converter = alphabet.get_batch_converter()
309
+ batch_labels, batch_strs, batch_tokens = batch_converter(sample_data)
310
+ # Prepare tokenizer and make sure it matches
311
+ with TemporaryDirectory() as tempdir:
312
+ vocab = "\n".join(alphabet.all_toks)
313
+ vocab_file = Path(tempdir) / "vocab.txt"
314
+ vocab_file.write_text(vocab)
315
+ hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file))
316
+
317
+ hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors="pt", padding=True)
318
+ success = torch.all(hf_tokens["input_ids"] == batch_tokens)
319
+
320
+ print("Do both models tokenizers output the same tokens?", "🔥" if success else "💩")
321
+ if not success:
322
+ raise Exception("Tokenization does not match!")
323
+
324
+ with torch.no_grad():
325
+ if is_folding_model:
326
+ # Let's test the model in parts
327
+ # ESMFold always converts the ESM stem to float16, which requires float16 ops
328
+ # that don't exist on CPU. Therefore, to test it we need to run it on GPU. However,
329
+ # ESMFold is what we in the community call a "big boy" and so we desperately avoid putting both the
330
+ # original and the converted model on the GPU at the same time.
331
+ their_output = esm.cuda().infer([row[1] for row in sample_data])
332
+ our_output = model.cuda()(
333
+ input_ids=hf_tokens["input_ids"].cuda(), attention_mask=hf_tokens["attention_mask"].cuda()
334
+ )
335
+ else:
336
+ our_output = model(**hf_tokens, output_hidden_states=True)
337
+ our_output = our_output["logits"]
338
+ if classification_head:
339
+ their_output = esm.model.classification_heads["mnli"](esm.extract_features(batch_tokens))
340
+ else:
341
+ their_output = esm(hf_tokens["input_ids"], repr_layers=list(range(999)))
342
+ their_output = their_output["logits"]
343
+
344
+ if is_folding_model:
345
+ max_absolute_diff = torch.max(torch.abs(our_output["positions"] - their_output["positions"])).item()
346
+ success = torch.allclose(our_output["positions"], their_output["positions"], atol=1e-5)
347
+ else:
348
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
349
+ success = torch.allclose(our_output, their_output, atol=1e-5)
350
+
351
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5
352
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
353
+
354
+ if not success:
355
+ raise Exception("Something went wRoNg")
356
+
357
+ if not is_folding_model:
358
+ # Let's check contact prediction too
359
+ our_output = model.predict_contacts(hf_tokens["input_ids"], hf_tokens["attention_mask"])
360
+ their_output = esm.predict_contacts(hf_tokens["input_ids"])
361
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
362
+ success = torch.allclose(our_output, their_output, atol=1e-5)
363
+
364
+ print("Contact prediction testing:")
365
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5
366
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
367
+
368
+ if not success:
369
+ raise Exception("Something went wRoNg")
370
+
371
+ pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
372
+ print(f"Saving model to {pytorch_dump_folder_path}")
373
+ model.save_pretrained(pytorch_dump_folder_path)
374
+
375
+ del esm # Free up some memory before continuing
376
+
377
+ print(f"Saving tokenizer to {pytorch_dump_folder_path}")
378
+ hf_tokenizer.save_pretrained(pytorch_dump_folder_path)
379
+
380
+ if push_to_repo:
381
+ model.push_to_hub(repo_id=push_to_repo, token_token=auth_token)
382
+ hf_tokenizer.push_to_hub(repo_id=push_to_repo, token_token=auth_token)
383
+
384
+
385
+ if __name__ == "__main__":
386
+ parser = argparse.ArgumentParser()
387
+ # Required parameters
388
+ parser.add_argument(
389
+ "--pytorch_dump_folder_path", type=str, required=True, help="Path to the output PyTorch model."
390
+ )
391
+ parser.add_argument(
392
+ "--classification_head", action="store_true", help="Whether to convert a final classification head."
393
+ )
394
+ parser.add_argument("--model", default=None, type=str, required=True, help="Name of model to convert.")
395
+ parser.add_argument("--push_to_repo", type=str, help="Repo to upload to (including username!).")
396
+ parser.add_argument("--auth_token", type=str, help="HuggingFace auth token.")
397
+ args = parser.parse_args()
398
+ convert_esm_checkpoint_to_pytorch(
399
+ args.model, args.pytorch_dump_folder_path, args.classification_head, args.push_to_repo, args.auth_token
400
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/esm/modeling_esm.py ADDED
@@ -0,0 +1,1265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ESM model."""
16
+
17
+ import math
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPoolingAndCrossAttentions,
29
+ MaskedLMOutput,
30
+ SequenceClassifierOutput,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
34
+ from ...utils import logging
35
+ from .configuration_esm import EsmConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D"
41
+ _CONFIG_FOR_DOC = "EsmConfig"
42
+
43
+
44
+ from ..deprecated._archive_maps import ESM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
45
+
46
+
47
+ def rotate_half(x):
48
+ x1, x2 = x.chunk(2, dim=-1)
49
+ return torch.cat((-x2, x1), dim=-1)
50
+
51
+
52
+ def apply_rotary_pos_emb(x, cos, sin):
53
+ cos = cos[:, :, : x.shape[-2], :]
54
+ sin = sin[:, :, : x.shape[-2], :]
55
+
56
+ return (x * cos) + (rotate_half(x) * sin)
57
+
58
+
59
+ def gelu(x):
60
+ """
61
+ This is the gelu implementation from the original ESM repo. Using F.gelu yields subtly wrong results.
62
+ """
63
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
64
+
65
+
66
+ def symmetrize(x):
67
+ "Make layer symmetric in final two dimensions, used for contact prediction."
68
+ return x + x.transpose(-1, -2)
69
+
70
+
71
+ def average_product_correct(x):
72
+ "Perform average product correct, used for contact prediction."
73
+ a1 = x.sum(-1, keepdims=True)
74
+ a2 = x.sum(-2, keepdims=True)
75
+ a12 = x.sum((-1, -2), keepdims=True)
76
+
77
+ avg = a1 * a2
78
+ avg.div_(a12) # in-place to reduce memory
79
+ normalized = x - avg
80
+ return normalized
81
+
82
+
83
+ class RotaryEmbedding(torch.nn.Module):
84
+ """
85
+ Rotary position embeddings based on those in
86
+ [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
87
+ matrices which depend on their relative positions.
88
+ """
89
+
90
+ def __init__(self, dim: int):
91
+ super().__init__()
92
+ # Generate and save the inverse frequency buffer (non trainable)
93
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
94
+ inv_freq = inv_freq
95
+ self.register_buffer("inv_freq", inv_freq)
96
+
97
+ self._seq_len_cached = None
98
+ self._cos_cached = None
99
+ self._sin_cached = None
100
+
101
+ def _update_cos_sin_tables(self, x, seq_dimension=2):
102
+ seq_len = x.shape[seq_dimension]
103
+
104
+ # Reset the tables if the sequence length has changed,
105
+ # or if we're on a new device (possibly due to tracing for instance)
106
+ if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
107
+ self._seq_len_cached = seq_len
108
+ t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq)
109
+ freqs = torch.outer(t, self.inv_freq)
110
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
111
+
112
+ self._cos_cached = emb.cos()[None, None, :, :]
113
+ self._sin_cached = emb.sin()[None, None, :, :]
114
+
115
+ return self._cos_cached, self._sin_cached
116
+
117
+ def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
118
+ self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2)
119
+
120
+ return (
121
+ apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
122
+ apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
123
+ )
124
+
125
+
126
+ class EsmContactPredictionHead(nn.Module):
127
+ """Performs symmetrization, apc, and computes a logistic regression on the output features"""
128
+
129
+ def __init__(
130
+ self,
131
+ in_features: int,
132
+ bias=True,
133
+ eos_idx: int = 2,
134
+ ):
135
+ super().__init__()
136
+ self.in_features = in_features
137
+ self.eos_idx = eos_idx
138
+ self.regression = nn.Linear(in_features, 1, bias)
139
+ self.activation = nn.Sigmoid()
140
+
141
+ def forward(self, tokens, attentions):
142
+ # remove eos token attentions
143
+ eos_mask = tokens.ne(self.eos_idx).to(attentions)
144
+ eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
145
+ attentions = attentions * eos_mask[:, None, None, :, :]
146
+ attentions = attentions[..., :-1, :-1]
147
+ # remove cls token attentions
148
+ attentions = attentions[..., 1:, 1:]
149
+ batch_size, layers, heads, seqlen, _ = attentions.size()
150
+ attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
151
+
152
+ # features: batch x channels x tokens x tokens (symmetric)
153
+ attentions = attentions.to(
154
+ self.regression.weight.device
155
+ ) # attentions always float32, may need to convert to float16
156
+ attentions = average_product_correct(symmetrize(attentions))
157
+ attentions = attentions.permute(0, 2, 3, 1)
158
+ return self.activation(self.regression(attentions).squeeze(3))
159
+
160
+
161
+ class EsmEmbeddings(nn.Module):
162
+ """
163
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
164
+ """
165
+
166
+ def __init__(self, config):
167
+ super().__init__()
168
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
169
+
170
+ if config.emb_layer_norm_before:
171
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
172
+ else:
173
+ self.layer_norm = None
174
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
175
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
176
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
177
+ self.register_buffer(
178
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
179
+ )
180
+
181
+ self.padding_idx = config.pad_token_id
182
+ self.position_embeddings = nn.Embedding(
183
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
184
+ )
185
+ self.token_dropout = config.token_dropout
186
+ self.mask_token_id = config.mask_token_id
187
+
188
+ def forward(
189
+ self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
190
+ ):
191
+ if position_ids is None:
192
+ if input_ids is not None:
193
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
194
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
195
+ else:
196
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
197
+
198
+ if inputs_embeds is None:
199
+ inputs_embeds = self.word_embeddings(input_ids)
200
+
201
+ # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an
202
+ # embedding_scale factor here.
203
+ embeddings = inputs_embeds
204
+
205
+ # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout
206
+ # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however,
207
+ # masked tokens are treated as if they were selected for input dropout and zeroed out.
208
+ # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by
209
+ # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample).
210
+ # This is analogous to the way that dropout layers scale down outputs during evaluation when not
211
+ # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training).
212
+ if self.token_dropout:
213
+ embeddings = embeddings.masked_fill((input_ids == self.mask_token_id).unsqueeze(-1), 0.0)
214
+ mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs
215
+ src_lengths = attention_mask.sum(-1)
216
+ mask_ratio_observed = (input_ids == self.mask_token_id).sum(-1).float() / src_lengths
217
+ embeddings = (embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]).to(
218
+ embeddings.dtype
219
+ )
220
+
221
+ if self.position_embedding_type == "absolute":
222
+ position_embeddings = self.position_embeddings(position_ids)
223
+ embeddings = embeddings + position_embeddings
224
+
225
+ if self.layer_norm is not None:
226
+ embeddings = self.layer_norm(embeddings)
227
+ if attention_mask is not None:
228
+ embeddings = (embeddings * attention_mask.unsqueeze(-1)).to(embeddings.dtype)
229
+ # Matt: I think this line was copied incorrectly from BERT, disabling it for now.
230
+ # embeddings = self.dropout(embeddings)
231
+ return embeddings
232
+
233
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
234
+ """
235
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
236
+
237
+ Args:
238
+ inputs_embeds: torch.Tensor
239
+
240
+ Returns: torch.Tensor
241
+ """
242
+ input_shape = inputs_embeds.size()[:-1]
243
+ sequence_length = input_shape[1]
244
+
245
+ position_ids = torch.arange(
246
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
247
+ )
248
+ return position_ids.unsqueeze(0).expand(input_shape)
249
+
250
+
251
+ class EsmSelfAttention(nn.Module):
252
+ def __init__(self, config, position_embedding_type=None):
253
+ super().__init__()
254
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
255
+ raise ValueError(
256
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
257
+ f"heads ({config.num_attention_heads})"
258
+ )
259
+
260
+ self.num_attention_heads = config.num_attention_heads
261
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
262
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
263
+
264
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
265
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
266
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
267
+
268
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
269
+ self.position_embedding_type = position_embedding_type or getattr(
270
+ config, "position_embedding_type", "absolute"
271
+ )
272
+ self.rotary_embeddings = None
273
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
274
+ self.max_position_embeddings = config.max_position_embeddings
275
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
276
+ elif self.position_embedding_type == "rotary":
277
+ self.rotary_embeddings = RotaryEmbedding(dim=self.attention_head_size)
278
+
279
+ self.is_decoder = config.is_decoder
280
+
281
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
282
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
283
+ x = x.view(new_x_shape)
284
+ return x.permute(0, 2, 1, 3)
285
+
286
+ def forward(
287
+ self,
288
+ hidden_states: torch.Tensor,
289
+ attention_mask: Optional[torch.FloatTensor] = None,
290
+ head_mask: Optional[torch.FloatTensor] = None,
291
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
292
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
293
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
294
+ output_attentions: Optional[bool] = False,
295
+ ) -> Tuple[torch.Tensor]:
296
+ mixed_query_layer = self.query(hidden_states)
297
+
298
+ # If this is instantiated as a cross-attention module, the keys
299
+ # and values come from an encoder; the attention mask needs to be
300
+ # such that the encoder's padding tokens are not attended to.
301
+ is_cross_attention = encoder_hidden_states is not None
302
+
303
+ if is_cross_attention and past_key_value is not None:
304
+ # reuse k,v, cross_attentions
305
+ key_layer = past_key_value[0]
306
+ value_layer = past_key_value[1]
307
+ attention_mask = encoder_attention_mask
308
+ elif is_cross_attention:
309
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
310
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
311
+ attention_mask = encoder_attention_mask
312
+ elif past_key_value is not None:
313
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
314
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
315
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
316
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
317
+ else:
318
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
319
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
320
+
321
+ query_layer = self.transpose_for_scores(mixed_query_layer)
322
+
323
+ # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim).
324
+ # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent,
325
+ # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original
326
+ # ESM code and fix rotary embeddings.
327
+ query_layer = query_layer * self.attention_head_size**-0.5
328
+
329
+ if self.is_decoder:
330
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
331
+ # Further calls to cross_attention layer can then reuse all cross-attention
332
+ # key/value_states (first "if" case)
333
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
334
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
335
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
336
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
337
+ past_key_value = (key_layer, value_layer)
338
+
339
+ if self.position_embedding_type == "rotary":
340
+ query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
341
+
342
+ # Take the dot product between "query" and "key" to get the raw attention scores.
343
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
344
+
345
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
346
+ seq_length = hidden_states.size()[1]
347
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
348
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
349
+ distance = position_ids_l - position_ids_r
350
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
351
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
352
+
353
+ if self.position_embedding_type == "relative_key":
354
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
355
+ attention_scores = attention_scores + relative_position_scores
356
+ elif self.position_embedding_type == "relative_key_query":
357
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
358
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
359
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
360
+
361
+ if attention_mask is not None:
362
+ # Apply the attention mask is (precomputed for all layers in EsmModel forward() function)
363
+ attention_scores = attention_scores + attention_mask
364
+
365
+ # Normalize the attention scores to probabilities.
366
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
367
+
368
+ # This is actually dropping out entire tokens to attend to, which might
369
+ # seem a bit unusual, but is taken from the original Transformer paper.
370
+ attention_probs = self.dropout(attention_probs)
371
+
372
+ # Mask heads if we want to
373
+ if head_mask is not None:
374
+ attention_probs = attention_probs * head_mask
375
+
376
+ context_layer = torch.matmul(attention_probs.to(value_layer.dtype), value_layer)
377
+
378
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
379
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
380
+ context_layer = context_layer.view(new_context_layer_shape)
381
+
382
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
383
+
384
+ if self.is_decoder:
385
+ outputs = outputs + (past_key_value,)
386
+ return outputs
387
+
388
+
389
+ class EsmSelfOutput(nn.Module):
390
+ def __init__(self, config):
391
+ super().__init__()
392
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
393
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
394
+
395
+ def forward(self, hidden_states, input_tensor):
396
+ hidden_states = self.dense(hidden_states)
397
+ hidden_states = self.dropout(hidden_states)
398
+ hidden_states = hidden_states + input_tensor
399
+ return hidden_states
400
+
401
+
402
+ class EsmAttention(nn.Module):
403
+ def __init__(self, config):
404
+ super().__init__()
405
+ self.self = EsmSelfAttention(config)
406
+ self.output = EsmSelfOutput(config)
407
+ self.pruned_heads = set()
408
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
409
+
410
+ def prune_heads(self, heads):
411
+ if len(heads) == 0:
412
+ return
413
+ heads, index = find_pruneable_heads_and_indices(
414
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
415
+ )
416
+
417
+ # Prune linear layers
418
+ self.self.query = prune_linear_layer(self.self.query, index)
419
+ self.self.key = prune_linear_layer(self.self.key, index)
420
+ self.self.value = prune_linear_layer(self.self.value, index)
421
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
422
+
423
+ # Update hyper params and store pruned heads
424
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
425
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
426
+ self.pruned_heads = self.pruned_heads.union(heads)
427
+
428
+ def forward(
429
+ self,
430
+ hidden_states,
431
+ attention_mask=None,
432
+ head_mask=None,
433
+ encoder_hidden_states=None,
434
+ encoder_attention_mask=None,
435
+ past_key_value=None,
436
+ output_attentions=False,
437
+ ):
438
+ hidden_states_ln = self.LayerNorm(hidden_states)
439
+ self_outputs = self.self(
440
+ hidden_states_ln,
441
+ attention_mask,
442
+ head_mask,
443
+ encoder_hidden_states,
444
+ encoder_attention_mask,
445
+ past_key_value,
446
+ output_attentions,
447
+ )
448
+ attention_output = self.output(self_outputs[0], hidden_states)
449
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
450
+ return outputs
451
+
452
+
453
+ class EsmIntermediate(nn.Module):
454
+ def __init__(self, config):
455
+ super().__init__()
456
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
457
+
458
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
459
+ hidden_states = self.dense(hidden_states)
460
+ hidden_states = gelu(hidden_states)
461
+ return hidden_states
462
+
463
+
464
+ class EsmOutput(nn.Module):
465
+ def __init__(self, config):
466
+ super().__init__()
467
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
468
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
469
+
470
+ def forward(self, hidden_states, input_tensor):
471
+ hidden_states = self.dense(hidden_states)
472
+ hidden_states = self.dropout(hidden_states)
473
+ hidden_states = hidden_states + input_tensor
474
+ return hidden_states
475
+
476
+
477
+ class EsmLayer(nn.Module):
478
+ def __init__(self, config):
479
+ super().__init__()
480
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
481
+ self.seq_len_dim = 1
482
+ self.attention = EsmAttention(config)
483
+ self.is_decoder = config.is_decoder
484
+ self.add_cross_attention = config.add_cross_attention
485
+ if self.add_cross_attention:
486
+ if not self.is_decoder:
487
+ raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
488
+ self.crossattention = EsmAttention(config)
489
+ self.intermediate = EsmIntermediate(config)
490
+ self.output = EsmOutput(config)
491
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
492
+
493
+ def forward(
494
+ self,
495
+ hidden_states,
496
+ attention_mask=None,
497
+ head_mask=None,
498
+ encoder_hidden_states=None,
499
+ encoder_attention_mask=None,
500
+ past_key_value=None,
501
+ output_attentions=False,
502
+ ):
503
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
504
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
505
+ self_attention_outputs = self.attention(
506
+ hidden_states,
507
+ attention_mask,
508
+ head_mask,
509
+ output_attentions=output_attentions,
510
+ past_key_value=self_attn_past_key_value,
511
+ )
512
+ attention_output = self_attention_outputs[0]
513
+
514
+ # if decoder, the last output is tuple of self-attn cache
515
+ if self.is_decoder:
516
+ outputs = self_attention_outputs[1:-1]
517
+ present_key_value = self_attention_outputs[-1]
518
+ else:
519
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
520
+
521
+ cross_attn_present_key_value = None
522
+ if self.is_decoder and encoder_hidden_states is not None:
523
+ if not hasattr(self, "crossattention"):
524
+ raise AttributeError(
525
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated"
526
+ " with cross-attention layers by setting `config.add_cross_attention=True`"
527
+ )
528
+
529
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
530
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
531
+ cross_attention_outputs = self.crossattention(
532
+ attention_output,
533
+ attention_mask,
534
+ head_mask,
535
+ encoder_hidden_states,
536
+ encoder_attention_mask,
537
+ cross_attn_past_key_value,
538
+ output_attentions,
539
+ )
540
+ attention_output = cross_attention_outputs[0]
541
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
542
+
543
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
544
+ cross_attn_present_key_value = cross_attention_outputs[-1]
545
+ present_key_value = present_key_value + cross_attn_present_key_value
546
+
547
+ layer_output = self.feed_forward_chunk(attention_output)
548
+
549
+ outputs = (layer_output,) + outputs
550
+
551
+ # if decoder, return the attn key/values as the last output
552
+ if self.is_decoder:
553
+ outputs = outputs + (present_key_value,)
554
+ return outputs
555
+
556
+ def feed_forward_chunk(self, attention_output):
557
+ attention_output_ln = self.LayerNorm(attention_output)
558
+ intermediate_output = self.intermediate(attention_output_ln)
559
+ layer_output = self.output(intermediate_output, attention_output)
560
+ return layer_output
561
+
562
+
563
+ class EsmEncoder(nn.Module):
564
+ def __init__(self, config):
565
+ super().__init__()
566
+ self.config = config
567
+ self.layer = nn.ModuleList([EsmLayer(config) for _ in range(config.num_hidden_layers)])
568
+ self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
569
+ self.gradient_checkpointing = False
570
+
571
+ def forward(
572
+ self,
573
+ hidden_states,
574
+ attention_mask=None,
575
+ head_mask=None,
576
+ encoder_hidden_states=None,
577
+ encoder_attention_mask=None,
578
+ past_key_values=None,
579
+ use_cache=None,
580
+ output_attentions=False,
581
+ output_hidden_states=False,
582
+ return_dict=True,
583
+ ):
584
+ if self.gradient_checkpointing and self.training:
585
+ if use_cache:
586
+ logger.warning_once(
587
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
588
+ "`use_cache=False`..."
589
+ )
590
+ use_cache = False
591
+ all_hidden_states = () if output_hidden_states else None
592
+ all_self_attentions = () if output_attentions else None
593
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
594
+
595
+ next_decoder_cache = () if use_cache else None
596
+ for i, layer_module in enumerate(self.layer):
597
+ if output_hidden_states:
598
+ all_hidden_states = all_hidden_states + (hidden_states,)
599
+
600
+ layer_head_mask = head_mask[i] if head_mask is not None else None
601
+ past_key_value = past_key_values[i] if past_key_values is not None else None
602
+
603
+ if self.gradient_checkpointing and self.training:
604
+ layer_outputs = self._gradient_checkpointing_func(
605
+ layer_module.__call__,
606
+ hidden_states,
607
+ attention_mask,
608
+ layer_head_mask,
609
+ encoder_hidden_states,
610
+ encoder_attention_mask,
611
+ past_key_value,
612
+ output_attentions,
613
+ )
614
+ else:
615
+ layer_outputs = layer_module(
616
+ hidden_states,
617
+ attention_mask,
618
+ layer_head_mask,
619
+ encoder_hidden_states,
620
+ encoder_attention_mask,
621
+ past_key_value,
622
+ output_attentions,
623
+ )
624
+
625
+ hidden_states = layer_outputs[0]
626
+ if use_cache:
627
+ next_decoder_cache = next_decoder_cache + (layer_outputs[-1],)
628
+ if output_attentions:
629
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
630
+ if self.config.add_cross_attention:
631
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
632
+
633
+ if self.emb_layer_norm_after:
634
+ hidden_states = self.emb_layer_norm_after(hidden_states)
635
+
636
+ if output_hidden_states:
637
+ all_hidden_states = all_hidden_states + (hidden_states,)
638
+
639
+ if not return_dict:
640
+ return tuple(
641
+ v
642
+ for v in [
643
+ hidden_states,
644
+ next_decoder_cache,
645
+ all_hidden_states,
646
+ all_self_attentions,
647
+ all_cross_attentions,
648
+ ]
649
+ if v is not None
650
+ )
651
+ return BaseModelOutputWithPastAndCrossAttentions(
652
+ last_hidden_state=hidden_states,
653
+ past_key_values=next_decoder_cache,
654
+ hidden_states=all_hidden_states,
655
+ attentions=all_self_attentions,
656
+ cross_attentions=all_cross_attentions,
657
+ )
658
+
659
+
660
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
661
+ class EsmPooler(nn.Module):
662
+ def __init__(self, config):
663
+ super().__init__()
664
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
665
+ self.activation = nn.Tanh()
666
+
667
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
668
+ # We "pool" the model by simply taking the hidden state corresponding
669
+ # to the first token.
670
+ first_token_tensor = hidden_states[:, 0]
671
+ pooled_output = self.dense(first_token_tensor)
672
+ pooled_output = self.activation(pooled_output)
673
+ return pooled_output
674
+
675
+
676
+ class EsmPreTrainedModel(PreTrainedModel):
677
+ """
678
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
679
+ models.
680
+ """
681
+
682
+ config_class = EsmConfig
683
+ base_model_prefix = "esm"
684
+ supports_gradient_checkpointing = True
685
+ _no_split_modules = ["EsmLayer", "EsmFoldTriangularSelfAttentionBlock", "EsmEmbeddings"]
686
+
687
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
688
+ def _init_weights(self, module):
689
+ """Initialize the weights"""
690
+ if isinstance(module, nn.Linear):
691
+ # Slightly different from the TF version which uses truncated_normal for initialization
692
+ # cf https://github.com/pytorch/pytorch/pull/5617
693
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
694
+ if module.bias is not None:
695
+ module.bias.data.zero_()
696
+ elif isinstance(module, nn.Embedding):
697
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
698
+ if module.padding_idx is not None:
699
+ module.weight.data[module.padding_idx].zero_()
700
+ elif isinstance(module, nn.LayerNorm):
701
+ module.bias.data.zero_()
702
+ module.weight.data.fill_(1.0)
703
+
704
+
705
+ ESM_START_DOCSTRING = r"""
706
+
707
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
708
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
709
+ etc.)
710
+
711
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
712
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
713
+ and behavior.
714
+
715
+ Parameters:
716
+ config ([`EsmConfig`]): Model configuration class with all the parameters of the
717
+ model. Initializing with a config file does not load the weights associated with the model, only the
718
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
719
+ """
720
+
721
+ ESM_INPUTS_DOCSTRING = r"""
722
+ Args:
723
+ input_ids (`torch.LongTensor` of shape `({0})`):
724
+ Indices of input sequence tokens in the vocabulary.
725
+
726
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
727
+ [`PreTrainedTokenizer.__call__`] for details.
728
+
729
+ [What are input IDs?](../glossary#input-ids)
730
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
731
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
732
+
733
+ - 1 for tokens that are **not masked**,
734
+ - 0 for tokens that are **masked**.
735
+
736
+ [What are attention masks?](../glossary#attention-mask)
737
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
738
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
739
+ config.max_position_embeddings - 1]`.
740
+
741
+ [What are position IDs?](../glossary#position-ids)
742
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
743
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
744
+
745
+ - 1 indicates the head is **not masked**,
746
+ - 0 indicates the head is **masked**.
747
+
748
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
749
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
750
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
751
+ model's internal embedding lookup matrix.
752
+ output_attentions (`bool`, *optional*):
753
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
754
+ tensors for more detail.
755
+ output_hidden_states (`bool`, *optional*):
756
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
757
+ more detail.
758
+ return_dict (`bool`, *optional*):
759
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
760
+ """
761
+
762
+
763
+ @add_start_docstrings(
764
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
765
+ ESM_START_DOCSTRING,
766
+ )
767
+ class EsmModel(EsmPreTrainedModel):
768
+ """
769
+
770
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
771
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
772
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
773
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
774
+
775
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
776
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
777
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
778
+ """
779
+
780
+ def __init__(self, config, add_pooling_layer=True):
781
+ super().__init__(config)
782
+ self.config = config
783
+
784
+ self.embeddings = EsmEmbeddings(config)
785
+ self.encoder = EsmEncoder(config)
786
+
787
+ self.pooler = EsmPooler(config) if add_pooling_layer else None
788
+
789
+ self.contact_head = EsmContactPredictionHead(
790
+ in_features=config.num_hidden_layers * config.num_attention_heads, bias=True
791
+ )
792
+
793
+ # Initialize weights and apply final processing
794
+ self.post_init()
795
+
796
+ def get_input_embeddings(self):
797
+ return self.embeddings.word_embeddings
798
+
799
+ def set_input_embeddings(self, value):
800
+ self.embeddings.word_embeddings = value
801
+
802
+ def _prune_heads(self, heads_to_prune):
803
+ """
804
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
805
+ class PreTrainedModel
806
+ """
807
+ for layer, heads in heads_to_prune.items():
808
+ self.encoder.layer[layer].attention.prune_heads(heads)
809
+
810
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
811
+ @add_code_sample_docstrings(
812
+ checkpoint=_CHECKPOINT_FOR_DOC,
813
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
814
+ config_class=_CONFIG_FOR_DOC,
815
+ )
816
+ def forward(
817
+ self,
818
+ input_ids: Optional[torch.Tensor] = None,
819
+ attention_mask: Optional[torch.Tensor] = None,
820
+ position_ids: Optional[torch.Tensor] = None,
821
+ head_mask: Optional[torch.Tensor] = None,
822
+ inputs_embeds: Optional[torch.Tensor] = None,
823
+ encoder_hidden_states: Optional[torch.Tensor] = None,
824
+ encoder_attention_mask: Optional[torch.Tensor] = None,
825
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
826
+ use_cache: Optional[bool] = None,
827
+ output_attentions: Optional[bool] = None,
828
+ output_hidden_states: Optional[bool] = None,
829
+ return_dict: Optional[bool] = None,
830
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
831
+ r"""
832
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
833
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
834
+ the model is configured as a decoder.
835
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
836
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
837
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
838
+
839
+ - 1 for tokens that are **not masked**,
840
+ - 0 for tokens that are **masked**.
841
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
842
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
843
+
844
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
845
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
846
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
847
+ use_cache (`bool`, *optional*):
848
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
849
+ `past_key_values`).
850
+ """
851
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
852
+ output_hidden_states = (
853
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
854
+ )
855
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
856
+
857
+ if self.config.is_decoder:
858
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
859
+ else:
860
+ use_cache = False
861
+
862
+ if input_ids is not None and inputs_embeds is not None:
863
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
864
+ elif input_ids is not None:
865
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
866
+ input_shape = input_ids.size()
867
+ elif inputs_embeds is not None:
868
+ input_shape = inputs_embeds.size()[:-1]
869
+ else:
870
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
871
+
872
+ batch_size, seq_length = input_shape
873
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
874
+
875
+ # past_key_values_length
876
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
877
+
878
+ if attention_mask is None:
879
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
880
+
881
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
882
+ # ourselves in which case we just need to make it broadcastable to all heads.
883
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
884
+
885
+ # If a 2D or 3D attention mask is provided for the cross-attention
886
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
887
+ if self.config.is_decoder and encoder_hidden_states is not None:
888
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
889
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
890
+ if encoder_attention_mask is None:
891
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
892
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
893
+ else:
894
+ encoder_extended_attention_mask = None
895
+
896
+ # Prepare head mask if needed
897
+ # 1.0 in head_mask indicate we keep the head
898
+ # attention_probs has shape bsz x n_heads x N x N
899
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
900
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
901
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
902
+
903
+ embedding_output = self.embeddings(
904
+ input_ids=input_ids,
905
+ position_ids=position_ids,
906
+ attention_mask=attention_mask,
907
+ inputs_embeds=inputs_embeds,
908
+ past_key_values_length=past_key_values_length,
909
+ )
910
+ encoder_outputs = self.encoder(
911
+ embedding_output,
912
+ attention_mask=extended_attention_mask,
913
+ head_mask=head_mask,
914
+ encoder_hidden_states=encoder_hidden_states,
915
+ encoder_attention_mask=encoder_extended_attention_mask,
916
+ past_key_values=past_key_values,
917
+ use_cache=use_cache,
918
+ output_attentions=output_attentions,
919
+ output_hidden_states=output_hidden_states,
920
+ return_dict=return_dict,
921
+ )
922
+ sequence_output = encoder_outputs[0]
923
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
924
+
925
+ if not return_dict:
926
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
927
+
928
+ return BaseModelOutputWithPoolingAndCrossAttentions(
929
+ last_hidden_state=sequence_output,
930
+ pooler_output=pooled_output,
931
+ past_key_values=encoder_outputs.past_key_values,
932
+ hidden_states=encoder_outputs.hidden_states,
933
+ attentions=encoder_outputs.attentions,
934
+ cross_attentions=encoder_outputs.cross_attentions,
935
+ )
936
+
937
+ def predict_contacts(self, tokens, attention_mask):
938
+ attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions
939
+ attns = torch.stack(attns, dim=1) # Matches the original model layout
940
+ # In the original model, attentions for padding tokens are completely zeroed out.
941
+ # This makes no difference most of the time because the other tokens won't attend to them,
942
+ # but it does for the contact prediction task, which takes attentions as input,
943
+ # so we have to mimic that here.
944
+ attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(3)
945
+ attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(4)
946
+ return self.contact_head(tokens, attns)
947
+
948
+
949
+ @add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING)
950
+ class EsmForMaskedLM(EsmPreTrainedModel):
951
+ _tied_weights_keys = ["lm_head.decoder.weight"]
952
+
953
+ def __init__(self, config):
954
+ super().__init__(config)
955
+
956
+ if config.is_decoder:
957
+ logger.warning(
958
+ "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for "
959
+ "bi-directional self-attention."
960
+ )
961
+
962
+ self.esm = EsmModel(config, add_pooling_layer=False)
963
+ self.lm_head = EsmLMHead(config)
964
+
965
+ self.init_weights()
966
+
967
+ def get_output_embeddings(self):
968
+ return self.lm_head.decoder
969
+
970
+ def set_output_embeddings(self, new_embeddings):
971
+ self.lm_head.decoder = new_embeddings
972
+
973
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
974
+ @add_code_sample_docstrings(
975
+ checkpoint=_CHECKPOINT_FOR_DOC,
976
+ output_type=MaskedLMOutput,
977
+ config_class=_CONFIG_FOR_DOC,
978
+ mask="<mask>",
979
+ )
980
+ def forward(
981
+ self,
982
+ input_ids: Optional[torch.LongTensor] = None,
983
+ attention_mask: Optional[torch.Tensor] = None,
984
+ position_ids: Optional[torch.LongTensor] = None,
985
+ head_mask: Optional[torch.Tensor] = None,
986
+ inputs_embeds: Optional[torch.FloatTensor] = None,
987
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
988
+ encoder_attention_mask: Optional[torch.Tensor] = None,
989
+ labels: Optional[torch.LongTensor] = None,
990
+ output_attentions: Optional[bool] = None,
991
+ output_hidden_states: Optional[bool] = None,
992
+ return_dict: Optional[bool] = None,
993
+ ) -> Union[Tuple, MaskedLMOutput]:
994
+ r"""
995
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
996
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
997
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
998
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
999
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1000
+ Used to hide legacy arguments that have been deprecated.
1001
+ """
1002
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1003
+
1004
+ outputs = self.esm(
1005
+ input_ids,
1006
+ attention_mask=attention_mask,
1007
+ position_ids=position_ids,
1008
+ head_mask=head_mask,
1009
+ inputs_embeds=inputs_embeds,
1010
+ encoder_hidden_states=encoder_hidden_states,
1011
+ encoder_attention_mask=encoder_attention_mask,
1012
+ output_attentions=output_attentions,
1013
+ output_hidden_states=output_hidden_states,
1014
+ return_dict=return_dict,
1015
+ )
1016
+ sequence_output = outputs[0]
1017
+ prediction_scores = self.lm_head(sequence_output)
1018
+
1019
+ masked_lm_loss = None
1020
+ if labels is not None:
1021
+ loss_fct = CrossEntropyLoss()
1022
+
1023
+ labels = labels.to(prediction_scores.device)
1024
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1025
+
1026
+ if not return_dict:
1027
+ output = (prediction_scores,) + outputs[2:]
1028
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1029
+
1030
+ return MaskedLMOutput(
1031
+ loss=masked_lm_loss,
1032
+ logits=prediction_scores,
1033
+ hidden_states=outputs.hidden_states,
1034
+ attentions=outputs.attentions,
1035
+ )
1036
+
1037
+ def predict_contacts(self, tokens, attention_mask):
1038
+ return self.esm.predict_contacts(tokens, attention_mask=attention_mask)
1039
+
1040
+
1041
+ class EsmLMHead(nn.Module):
1042
+ """ESM Head for masked language modeling."""
1043
+
1044
+ def __init__(self, config):
1045
+ super().__init__()
1046
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1047
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1048
+
1049
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1050
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1051
+
1052
+ def forward(self, features, **kwargs):
1053
+ x = self.dense(features)
1054
+ x = gelu(x)
1055
+ x = self.layer_norm(x)
1056
+
1057
+ # project back to size of vocabulary with bias
1058
+ x = self.decoder(x) + self.bias
1059
+ return x
1060
+
1061
+
1062
+ @add_start_docstrings(
1063
+ """
1064
+ ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1065
+ output) e.g. for GLUE tasks.
1066
+ """,
1067
+ ESM_START_DOCSTRING,
1068
+ )
1069
+ class EsmForSequenceClassification(EsmPreTrainedModel):
1070
+ def __init__(self, config):
1071
+ super().__init__(config)
1072
+ self.num_labels = config.num_labels
1073
+ self.config = config
1074
+
1075
+ self.esm = EsmModel(config, add_pooling_layer=False)
1076
+ self.classifier = EsmClassificationHead(config)
1077
+
1078
+ self.init_weights()
1079
+
1080
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1081
+ @add_code_sample_docstrings(
1082
+ checkpoint=_CHECKPOINT_FOR_DOC,
1083
+ output_type=SequenceClassifierOutput,
1084
+ config_class=_CONFIG_FOR_DOC,
1085
+ )
1086
+ def forward(
1087
+ self,
1088
+ input_ids: Optional[torch.LongTensor] = None,
1089
+ attention_mask: Optional[torch.Tensor] = None,
1090
+ position_ids: Optional[torch.LongTensor] = None,
1091
+ head_mask: Optional[torch.Tensor] = None,
1092
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1093
+ labels: Optional[torch.LongTensor] = None,
1094
+ output_attentions: Optional[bool] = None,
1095
+ output_hidden_states: Optional[bool] = None,
1096
+ return_dict: Optional[bool] = None,
1097
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1098
+ r"""
1099
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1100
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1101
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1102
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1103
+ """
1104
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1105
+
1106
+ outputs = self.esm(
1107
+ input_ids,
1108
+ attention_mask=attention_mask,
1109
+ position_ids=position_ids,
1110
+ head_mask=head_mask,
1111
+ inputs_embeds=inputs_embeds,
1112
+ output_attentions=output_attentions,
1113
+ output_hidden_states=output_hidden_states,
1114
+ return_dict=return_dict,
1115
+ )
1116
+ sequence_output = outputs[0]
1117
+ logits = self.classifier(sequence_output)
1118
+
1119
+ loss = None
1120
+ if labels is not None:
1121
+ labels = labels.to(logits.device)
1122
+
1123
+ if self.config.problem_type is None:
1124
+ if self.num_labels == 1:
1125
+ self.config.problem_type = "regression"
1126
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1127
+ self.config.problem_type = "single_label_classification"
1128
+ else:
1129
+ self.config.problem_type = "multi_label_classification"
1130
+
1131
+ if self.config.problem_type == "regression":
1132
+ loss_fct = MSELoss()
1133
+ if self.num_labels == 1:
1134
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1135
+ else:
1136
+ loss = loss_fct(logits, labels)
1137
+ elif self.config.problem_type == "single_label_classification":
1138
+ loss_fct = CrossEntropyLoss()
1139
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1140
+ elif self.config.problem_type == "multi_label_classification":
1141
+ loss_fct = BCEWithLogitsLoss()
1142
+ loss = loss_fct(logits, labels)
1143
+
1144
+ if not return_dict:
1145
+ output = (logits,) + outputs[2:]
1146
+ return ((loss,) + output) if loss is not None else output
1147
+
1148
+ return SequenceClassifierOutput(
1149
+ loss=loss,
1150
+ logits=logits,
1151
+ hidden_states=outputs.hidden_states,
1152
+ attentions=outputs.attentions,
1153
+ )
1154
+
1155
+
1156
+ @add_start_docstrings(
1157
+ """
1158
+ ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1159
+ Named-Entity-Recognition (NER) tasks.
1160
+ """,
1161
+ ESM_START_DOCSTRING,
1162
+ )
1163
+ class EsmForTokenClassification(EsmPreTrainedModel):
1164
+ def __init__(self, config):
1165
+ super().__init__(config)
1166
+ self.num_labels = config.num_labels
1167
+
1168
+ self.esm = EsmModel(config, add_pooling_layer=False)
1169
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1170
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1171
+
1172
+ self.init_weights()
1173
+
1174
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1175
+ @add_code_sample_docstrings(
1176
+ checkpoint=_CHECKPOINT_FOR_DOC,
1177
+ output_type=TokenClassifierOutput,
1178
+ config_class=_CONFIG_FOR_DOC,
1179
+ )
1180
+ def forward(
1181
+ self,
1182
+ input_ids: Optional[torch.LongTensor] = None,
1183
+ attention_mask: Optional[torch.Tensor] = None,
1184
+ position_ids: Optional[torch.LongTensor] = None,
1185
+ head_mask: Optional[torch.Tensor] = None,
1186
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1187
+ labels: Optional[torch.LongTensor] = None,
1188
+ output_attentions: Optional[bool] = None,
1189
+ output_hidden_states: Optional[bool] = None,
1190
+ return_dict: Optional[bool] = None,
1191
+ ) -> Union[Tuple, TokenClassifierOutput]:
1192
+ r"""
1193
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1194
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1195
+ """
1196
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1197
+
1198
+ outputs = self.esm(
1199
+ input_ids,
1200
+ attention_mask=attention_mask,
1201
+ position_ids=position_ids,
1202
+ head_mask=head_mask,
1203
+ inputs_embeds=inputs_embeds,
1204
+ output_attentions=output_attentions,
1205
+ output_hidden_states=output_hidden_states,
1206
+ return_dict=return_dict,
1207
+ )
1208
+
1209
+ sequence_output = outputs[0]
1210
+
1211
+ sequence_output = self.dropout(sequence_output)
1212
+ logits = self.classifier(sequence_output)
1213
+
1214
+ loss = None
1215
+ if labels is not None:
1216
+ loss_fct = CrossEntropyLoss()
1217
+
1218
+ labels = labels.to(logits.device)
1219
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1220
+
1221
+ if not return_dict:
1222
+ output = (logits,) + outputs[2:]
1223
+ return ((loss,) + output) if loss is not None else output
1224
+
1225
+ return TokenClassifierOutput(
1226
+ loss=loss,
1227
+ logits=logits,
1228
+ hidden_states=outputs.hidden_states,
1229
+ attentions=outputs.attentions,
1230
+ )
1231
+
1232
+
1233
+ class EsmClassificationHead(nn.Module):
1234
+ """Head for sentence-level classification tasks."""
1235
+
1236
+ def __init__(self, config):
1237
+ super().__init__()
1238
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1239
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1240
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1241
+
1242
+ def forward(self, features, **kwargs):
1243
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1244
+ x = self.dropout(x)
1245
+ x = self.dense(x)
1246
+ x = torch.tanh(x)
1247
+ x = self.dropout(x)
1248
+ x = self.out_proj(x)
1249
+ return x
1250
+
1251
+
1252
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1253
+ """
1254
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1255
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1256
+
1257
+ Args:
1258
+ x: torch.Tensor x:
1259
+
1260
+ Returns: torch.Tensor
1261
+ """
1262
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1263
+ mask = input_ids.ne(padding_idx).int()
1264
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1265
+ return incremental_indices.long() + padding_idx
llmeval-env/lib/python3.10/site-packages/transformers/models/esm/modeling_esmfold.py ADDED
@@ -0,0 +1,2322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import math
16
+ import sys
17
+ from dataclasses import dataclass
18
+ from functools import partial
19
+ from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.nn as nn
24
+ from torch.nn import LayerNorm
25
+
26
+ from ...integrations.deepspeed import is_deepspeed_available
27
+ from ...modeling_outputs import ModelOutput
28
+ from ...utils import (
29
+ ContextManagers,
30
+ add_start_docstrings,
31
+ add_start_docstrings_to_model_forward,
32
+ is_scipy_available,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_esm import EsmConfig
37
+ from .modeling_esm import ESM_START_DOCSTRING, EsmModel, EsmPreTrainedModel
38
+ from .openfold_utils import (
39
+ OFProtein,
40
+ Rigid,
41
+ Rotation,
42
+ atom14_to_atom37,
43
+ chunk_layer,
44
+ compute_predicted_aligned_error,
45
+ compute_tm,
46
+ frames_and_literature_positions_to_atom14_pos,
47
+ make_atom14_masks,
48
+ residue_constants,
49
+ to_pdb,
50
+ torsion_angles_to_frames,
51
+ )
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+ _CHECKPOINT_FOR_DOC = "facebook/esmfold_v1"
56
+ _CONFIG_FOR_DOC = "EsmConfig"
57
+
58
+
59
+ @dataclass
60
+ class EsmForProteinFoldingOutput(ModelOutput):
61
+ """
62
+ Output type of [`EsmForProteinFoldingOutput`].
63
+
64
+ Args:
65
+ frames (`torch.FloatTensor`):
66
+ Output frames.
67
+ sidechain_frames (`torch.FloatTensor`):
68
+ Output sidechain frames.
69
+ unnormalized_angles (`torch.FloatTensor`):
70
+ Predicted unnormalized backbone and side chain torsion angles.
71
+ angles (`torch.FloatTensor`):
72
+ Predicted backbone and side chain torsion angles.
73
+ positions (`torch.FloatTensor`):
74
+ Predicted positions of the backbone and side chain atoms.
75
+ states (`torch.FloatTensor`):
76
+ Hidden states from the protein folding trunk.
77
+ s_s (`torch.FloatTensor`):
78
+ Per-residue embeddings derived by concatenating the hidden states of each layer of the ESM-2 LM stem.
79
+ s_z (`torch.FloatTensor`):
80
+ Pairwise residue embeddings.
81
+ distogram_logits (`torch.FloatTensor`):
82
+ Input logits to the distogram used to compute residue distances.
83
+ lm_logits (`torch.FloatTensor`):
84
+ Logits output by the ESM-2 protein language model stem.
85
+ aatype (`torch.FloatTensor`):
86
+ Input amino acids (AlphaFold2 indices).
87
+ atom14_atom_exists (`torch.FloatTensor`):
88
+ Whether each atom exists in the atom14 representation.
89
+ residx_atom14_to_atom37 (`torch.FloatTensor`):
90
+ Mapping between atoms in the atom14 and atom37 representations.
91
+ residx_atom37_to_atom14 (`torch.FloatTensor`):
92
+ Mapping between atoms in the atom37 and atom14 representations.
93
+ atom37_atom_exists (`torch.FloatTensor`):
94
+ Whether each atom exists in the atom37 representation.
95
+ residue_index (`torch.FloatTensor`):
96
+ The index of each residue in the protein chain. Unless internal padding tokens are used, this will just be
97
+ a sequence of integers from 0 to `sequence_length`.
98
+ lddt_head (`torch.FloatTensor`):
99
+ Raw outputs from the lddt head used to compute plddt.
100
+ plddt (`torch.FloatTensor`):
101
+ Per-residue confidence scores. Regions of low confidence may indicate areas where the model's prediction is
102
+ uncertain, or where the protein structure is disordered.
103
+ ptm_logits (`torch.FloatTensor`):
104
+ Raw logits used for computing ptm.
105
+ ptm (`torch.FloatTensor`):
106
+ TM-score output representing the model's high-level confidence in the overall structure.
107
+ aligned_confidence_probs (`torch.FloatTensor`):
108
+ Per-residue confidence scores for the aligned structure.
109
+ predicted_aligned_error (`torch.FloatTensor`):
110
+ Predicted error between the model's prediction and the ground truth.
111
+ max_predicted_aligned_error (`torch.FloatTensor`):
112
+ Per-sample maximum predicted error.
113
+ """
114
+
115
+ frames: torch.FloatTensor = None
116
+ sidechain_frames: torch.FloatTensor = None
117
+ unnormalized_angles: torch.FloatTensor = None
118
+ angles: torch.FloatTensor = None
119
+ positions: torch.FloatTensor = None
120
+ states: torch.FloatTensor = None
121
+ s_s: torch.FloatTensor = None
122
+ s_z: torch.FloatTensor = None
123
+ distogram_logits: torch.FloatTensor = None
124
+ lm_logits: torch.FloatTensor = None
125
+ aatype: torch.FloatTensor = None
126
+ atom14_atom_exists: torch.FloatTensor = None
127
+ residx_atom14_to_atom37: torch.FloatTensor = None
128
+ residx_atom37_to_atom14: torch.FloatTensor = None
129
+ atom37_atom_exists: torch.FloatTensor = None
130
+ residue_index: torch.FloatTensor = None
131
+ lddt_head: torch.FloatTensor = None
132
+ plddt: torch.FloatTensor = None
133
+ ptm_logits: torch.FloatTensor = None
134
+ ptm: torch.FloatTensor = None
135
+ aligned_confidence_probs: torch.FloatTensor = None
136
+ predicted_aligned_error: torch.FloatTensor = None
137
+ max_predicted_aligned_error: torch.FloatTensor = None
138
+
139
+
140
+ ESMFOLD_INPUTS_DOCSTRING = r"""
141
+ Args:
142
+ input_ids (`torch.LongTensor` of shape `({0})`):
143
+ Indices of input sequence tokens in the vocabulary.
144
+
145
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
146
+ [`PreTrainedTokenizer.__call__`] for details.
147
+
148
+ [What are input IDs?](../glossary#input-ids)
149
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
150
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
151
+
152
+ - 1 for tokens that are **not masked**,
153
+ - 0 for tokens that are **masked**.
154
+
155
+ [What are attention masks?](../glossary#attention-mask)
156
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
157
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
158
+ config.max_position_embeddings - 1]`.
159
+
160
+ [What are position IDs?](../glossary#position-ids)
161
+ masking_pattern (`torch.LongTensor` of shape `({0})`, *optional*):
162
+ Locations of tokens to mask during training as a form of regularization. Mask values selected in `[0, 1]`.
163
+ num_recycles (`int`, *optional*, defaults to `None`):
164
+ Number of times to recycle the input sequence. If `None`, defaults to `config.num_recycles`. "Recycling"
165
+ consists of passing the output of the folding trunk back in as input to the trunk. During training, the
166
+ number of recycles should vary with each batch, to ensure that the model learns to output valid predictions
167
+ after each recycle. During inference, num_recycles should be set to the highest value that the model was
168
+ trained with for maximum accuracy. Accordingly, when this value is set to `None`, config.max_recycles is
169
+ used.
170
+ """
171
+
172
+
173
+ def is_fp16_enabled():
174
+ # Autocast world
175
+ fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16
176
+ fp16_enabled = fp16_enabled and torch.is_autocast_enabled()
177
+
178
+ return fp16_enabled
179
+
180
+
181
+ def is_deepspeed_initialized():
182
+ if is_deepspeed_available():
183
+ return False
184
+ else:
185
+ try:
186
+ import deepspeed
187
+
188
+ # This is not available in all DeepSpeed versions.
189
+ return deepspeed.utils.is_initialized()
190
+ except Exception:
191
+ return False
192
+
193
+
194
+ def collate_dense_tensors(samples: List[torch.Tensor], pad_v: float = 0) -> torch.Tensor:
195
+ """
196
+ Takes a list of tensors with the following dimensions:
197
+ [(d_11, ..., d_1K),
198
+ (d_21, ..., d_2K), ..., (d_N1, ..., d_NK)]
199
+ and stack + pads them into a single tensor of:
200
+ (N, max_i=1,N { d_i1 }, ..., max_i=1,N {diK})
201
+ """
202
+ if len(samples) == 0:
203
+ return torch.Tensor()
204
+ if len({x.dim() for x in samples}) != 1:
205
+ raise RuntimeError(f"Samples has varying dimensions: {[x.dim() for x in samples]}")
206
+ (device,) = tuple({x.device for x in samples}) # assumes all on same device
207
+ max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])]
208
+ result = torch.empty(len(samples), *max_shape, dtype=samples[0].dtype, device=device)
209
+ result.fill_(pad_v)
210
+ for i in range(len(samples)):
211
+ result_i = result[i]
212
+ t = samples[i]
213
+ result_i[tuple(slice(0, k) for k in t.shape)] = t
214
+ return result
215
+
216
+
217
+ def flatten_final_dims(t: torch.Tensor, no_dims: int):
218
+ return t.reshape(t.shape[:-no_dims] + (-1,))
219
+
220
+
221
+ def permute_final_dims(tensor: torch.Tensor, inds: List[int]):
222
+ zero_index = -1 * len(inds)
223
+ first_inds = list(range(len(tensor.shape[:zero_index])))
224
+ return tensor.permute(first_inds + [zero_index + i for i in inds])
225
+
226
+
227
+ def dict_multimap(fn, dicts):
228
+ first = dicts[0]
229
+ new_dict = {}
230
+ for k, v in first.items():
231
+ all_v = [d[k] for d in dicts]
232
+ if isinstance(v, dict):
233
+ new_dict[k] = dict_multimap(fn, all_v)
234
+ else:
235
+ new_dict[k] = fn(all_v)
236
+
237
+ return new_dict
238
+
239
+
240
+ def trunc_normal_init_(weights, scale=1.0, fan="fan_in"):
241
+ shape = weights.shape
242
+ scale = scale / max(1, shape[1])
243
+
244
+ if not is_scipy_available():
245
+ logger.warning(
246
+ "This init requires scipy, but scipy was not found, default to an approximation that might not be"
247
+ " equivalent."
248
+ )
249
+ std = math.sqrt(scale)
250
+ torch.nn.init.normal_(weights, std=std).clamp(min=0.0, max=2.0 * std)
251
+
252
+ else:
253
+ from scipy.stats import truncnorm
254
+
255
+ std = math.sqrt(scale) / truncnorm.std(a=-2, b=2, loc=0, scale=1)
256
+ samples = truncnorm.rvs(a=-2, b=2, loc=0, scale=std, size=weights.numel())
257
+ samples = np.reshape(samples, shape)
258
+ weights.copy_(torch.tensor(samples, device=weights.device))
259
+
260
+
261
+ def ipa_point_weights_init_(weights):
262
+ with torch.no_grad():
263
+ softplus_inverse_1 = 0.541324854612918
264
+ weights.fill_(softplus_inverse_1)
265
+
266
+
267
+ class EsmFoldLinear(nn.Linear):
268
+ """
269
+ A Linear layer with built-in nonstandard initializations. Called just like torch.nn.Linear.
270
+
271
+ Implements the initializers in 1.11.4, plus some additional ones found in the code.
272
+ """
273
+
274
+ def __init__(
275
+ self,
276
+ in_dim: int,
277
+ out_dim: int,
278
+ bias: bool = True,
279
+ init: str = "default",
280
+ init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,
281
+ ):
282
+ """
283
+ Args:
284
+ in_dim:
285
+ The final dimension of inputs to the layer
286
+ out_dim:
287
+ The final dimension of layer outputs
288
+ bias:
289
+ Whether to learn an additive bias. True by default
290
+ init:
291
+ The initializer to use. Choose from:
292
+
293
+ "default": LeCun fan-in truncated normal initialization "relu": He initialization w/ truncated normal
294
+ distribution "glorot": Fan-average Glorot uniform initialization "gating": Weights=0, Bias=1 "normal":
295
+ Normal initialization with std=1/sqrt(fan_in) "final": Weights=0, Bias=0
296
+
297
+ Overridden by init_fn if the latter is not None.
298
+ init_fn:
299
+ A custom initializer taking weight and bias as inputs. Overrides init if not None.
300
+ """
301
+ super().__init__(in_dim, out_dim, bias=bias)
302
+
303
+ if bias:
304
+ with torch.no_grad():
305
+ self.bias.fill_(0)
306
+ self.init = init
307
+ self.init_fn = init_fn
308
+
309
+ if init not in ["default", "relu", "glorot", "gating", "normal", "final"]:
310
+ raise ValueError("Invalid init string.")
311
+
312
+
313
+ class EsmFoldLayerNorm(nn.Module):
314
+ def __init__(self, c_in, eps=1e-5):
315
+ super().__init__()
316
+
317
+ self.c_in = (c_in,)
318
+ self.eps = eps
319
+
320
+ self.weight = nn.Parameter(torch.ones(c_in))
321
+ self.bias = nn.Parameter(torch.zeros(c_in))
322
+
323
+ def forward(self, x):
324
+ d = x.dtype
325
+ if d is torch.bfloat16 and not is_deepspeed_initialized():
326
+ with torch.cuda.amp.autocast(enabled=False):
327
+ out = nn.functional.layer_norm(x, self.c_in, self.weight.to(dtype=d), self.bias.to(dtype=d), self.eps)
328
+ else:
329
+ out = nn.functional.layer_norm(x, self.c_in, self.weight, self.bias, self.eps)
330
+
331
+ return out
332
+
333
+
334
+ @torch.jit.ignore
335
+ def softmax_no_cast(t: torch.Tensor, dim: int = -1) -> torch.Tensor:
336
+ """
337
+ Softmax, but without automatic casting to fp32 when the input is of type bfloat16
338
+ """
339
+ d = t.dtype
340
+ if d is torch.bfloat16 and not is_deepspeed_initialized():
341
+ with torch.cuda.amp.autocast(enabled=False):
342
+ s = torch.nn.functional.softmax(t, dim=dim)
343
+ else:
344
+ s = torch.nn.functional.softmax(t, dim=dim)
345
+
346
+ return s
347
+
348
+
349
+ class EsmFoldAttention(nn.Module):
350
+ """
351
+ Standard multi-head attention using AlphaFold's default layer initialization. Allows multiple bias vectors.
352
+ """
353
+
354
+ def __init__(
355
+ self,
356
+ c_q: int,
357
+ c_k: int,
358
+ c_v: int,
359
+ c_hidden: int,
360
+ no_heads: int,
361
+ gating: bool = True,
362
+ ):
363
+ """
364
+ Args:
365
+ c_q:
366
+ Input dimension of query data
367
+ c_k:
368
+ Input dimension of key data
369
+ c_v:
370
+ Input dimension of value data
371
+ c_hidden:
372
+ Per-head hidden dimension
373
+ no_heads:
374
+ Number of attention heads
375
+ gating:
376
+ Whether the output should be gated using query data
377
+ """
378
+ super().__init__()
379
+
380
+ self.c_q = c_q
381
+ self.c_k = c_k
382
+ self.c_v = c_v
383
+ self.c_hidden = c_hidden
384
+ self.no_heads = no_heads
385
+ self.gating = gating
386
+
387
+ # DISCREPANCY: c_hidden is not the per-head channel dimension, as
388
+ # stated in the supplement, but the overall channel dimension.
389
+
390
+ self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot")
391
+ self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init="glorot")
392
+ self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init="glorot")
393
+ self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init="final")
394
+
395
+ self.linear_g = None
396
+ if self.gating:
397
+ self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init="gating")
398
+
399
+ self.sigmoid = nn.Sigmoid()
400
+
401
+ def _prep_qkv(self, q_x: torch.Tensor, kv_x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
402
+ # [*, Q/K/V, H * C_hidden]
403
+ q = self.linear_q(q_x)
404
+ k = self.linear_k(kv_x)
405
+ v = self.linear_v(kv_x)
406
+
407
+ # [*, Q/K, H, C_hidden]
408
+ q = q.view(q.shape[:-1] + (self.no_heads, -1))
409
+ k = k.view(k.shape[:-1] + (self.no_heads, -1))
410
+ v = v.view(v.shape[:-1] + (self.no_heads, -1))
411
+
412
+ # [*, H, Q/K, C_hidden]
413
+ q = q.transpose(-2, -3)
414
+ k = k.transpose(-2, -3)
415
+ v = v.transpose(-2, -3)
416
+
417
+ q /= math.sqrt(self.c_hidden)
418
+
419
+ return q, k, v
420
+
421
+ def _wrap_up(self, o: torch.Tensor, q_x: torch.Tensor) -> torch.Tensor:
422
+ if self.linear_g is not None:
423
+ g = self.sigmoid(self.linear_g(q_x))
424
+
425
+ # [*, Q, H, C_hidden]
426
+ g = g.view(g.shape[:-1] + (self.no_heads, -1))
427
+ o = o * g
428
+
429
+ # [*, Q, H * C_hidden]
430
+ o = flatten_final_dims(o, 2)
431
+
432
+ # [*, Q, C_q]
433
+ o = self.linear_o(o)
434
+
435
+ return o
436
+
437
+ def forward(
438
+ self,
439
+ q_x: torch.Tensor,
440
+ kv_x: torch.Tensor,
441
+ biases: Optional[List[torch.Tensor]] = None,
442
+ use_memory_efficient_kernel: bool = False,
443
+ use_lma: bool = False,
444
+ lma_q_chunk_size: int = 1024,
445
+ lma_kv_chunk_size: int = 4096,
446
+ use_flash: bool = False,
447
+ flash_mask: Optional[torch.Tensor] = None,
448
+ ) -> torch.Tensor:
449
+ """
450
+ Args:
451
+ q_x:
452
+ [*, Q, C_q] query data
453
+ kv_x:
454
+ [*, K, C_k] key data
455
+ biases:
456
+ List of biases that broadcast to [*, H, Q, K]
457
+ use_memory_efficient_kernel:
458
+ Whether to use a custom memory-efficient attention kernel. This should be the default choice for most.
459
+ If none of the "use_<...>" flags are True, a stock PyTorch implementation is used instead
460
+ use_lma:
461
+ Whether to use low-memory attention (Staats & Rabe 2021). If none of the "use_<...>" flags are True, a
462
+ stock PyTorch implementation is used instead
463
+ lma_q_chunk_size:
464
+ Query chunk size (for LMA)
465
+ lma_kv_chunk_size:
466
+ Key/Value chunk size (for LMA)
467
+ Returns
468
+ [*, Q, C_q] attention update
469
+ """
470
+ if use_lma and (lma_q_chunk_size is None or lma_kv_chunk_size is None):
471
+ raise ValueError("If use_lma is specified, lma_q_chunk_size and lma_kv_chunk_size must be provided")
472
+
473
+ if use_flash and biases is not None:
474
+ raise ValueError("use_flash is incompatible with the bias option. For masking, use flash_mask instead")
475
+
476
+ attn_options = [use_memory_efficient_kernel, use_lma, use_flash]
477
+ if sum(attn_options) > 1:
478
+ raise ValueError("Choose at most one alternative attention algorithm")
479
+
480
+ if biases is None:
481
+ biases = []
482
+
483
+ # [*, H, Q/K, C_hidden]
484
+ query, key, value = self._prep_qkv(q_x, kv_x)
485
+ key = permute_final_dims(key, (1, 0))
486
+
487
+ # [*, H, Q, K]
488
+ output = torch.matmul(query, key)
489
+ for b in biases:
490
+ output += b
491
+ output = softmax_no_cast(output, -1)
492
+
493
+ # [*, H, Q, C_hidden]
494
+ output = torch.matmul(output, value)
495
+ output = output.transpose(-2, -3)
496
+ output = self._wrap_up(output, q_x)
497
+
498
+ return output
499
+
500
+
501
+ class EsmFoldTriangleAttention(nn.Module):
502
+ def __init__(self, c_in, c_hidden, no_heads, starting=True, inf=1e9):
503
+ """
504
+ Args:
505
+ c_in:
506
+ Input channel dimension
507
+ c_hidden:
508
+ Overall hidden channel dimension (not per-head)
509
+ no_heads:
510
+ Number of attention heads
511
+ """
512
+ super().__init__()
513
+
514
+ self.c_in = c_in
515
+ self.c_hidden = c_hidden
516
+ self.no_heads = no_heads
517
+ self.starting = starting
518
+ self.inf = inf
519
+
520
+ self.layer_norm = LayerNorm(self.c_in)
521
+
522
+ self.linear = EsmFoldLinear(c_in, self.no_heads, bias=False, init="normal")
523
+
524
+ self.mha = EsmFoldAttention(self.c_in, self.c_in, self.c_in, self.c_hidden, self.no_heads)
525
+
526
+ @torch.jit.ignore
527
+ def _chunk(
528
+ self,
529
+ x: torch.Tensor,
530
+ biases: List[torch.Tensor],
531
+ chunk_size: int,
532
+ use_memory_efficient_kernel: bool = False,
533
+ use_lma: bool = False,
534
+ inplace_safe: bool = False,
535
+ ) -> torch.Tensor:
536
+ "triangle! triangle!"
537
+ mha_inputs = {
538
+ "q_x": x,
539
+ "kv_x": x,
540
+ "biases": biases,
541
+ }
542
+
543
+ return chunk_layer(
544
+ partial(self.mha, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma),
545
+ mha_inputs,
546
+ chunk_size=chunk_size,
547
+ no_batch_dims=len(x.shape[:-2]),
548
+ _out=x if inplace_safe else None,
549
+ )
550
+
551
+ def forward(
552
+ self,
553
+ x: torch.Tensor,
554
+ mask: Optional[torch.Tensor] = None,
555
+ chunk_size: Optional[int] = None,
556
+ use_memory_efficient_kernel: bool = False,
557
+ use_lma: bool = False,
558
+ inplace_safe: bool = False,
559
+ ) -> torch.Tensor:
560
+ """
561
+ Args:
562
+ x:
563
+ [*, I, J, C_in] input tensor (e.g. the pair representation)
564
+ Returns:
565
+ [*, I, J, C_in] output tensor
566
+ """
567
+ if mask is None:
568
+ # [*, I, J]
569
+ mask = x.new_ones(
570
+ x.shape[:-1],
571
+ )
572
+
573
+ if not self.starting:
574
+ x = x.transpose(-2, -3)
575
+ mask = mask.transpose(-1, -2)
576
+
577
+ # [*, I, J, C_in]
578
+ x = self.layer_norm(x)
579
+
580
+ # [*, I, 1, 1, J]
581
+ mask_bias = (self.inf * (mask - 1))[..., :, None, None, :]
582
+
583
+ # [*, H, I, J]
584
+ triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1))
585
+
586
+ # [*, 1, H, I, J]
587
+ triangle_bias = triangle_bias.unsqueeze(-4)
588
+
589
+ biases = [mask_bias, triangle_bias]
590
+
591
+ if chunk_size is not None:
592
+ x = self._chunk(
593
+ x,
594
+ biases,
595
+ chunk_size,
596
+ use_memory_efficient_kernel=use_memory_efficient_kernel,
597
+ use_lma=use_lma,
598
+ inplace_safe=inplace_safe,
599
+ )
600
+ else:
601
+ x = self.mha(
602
+ q_x=x, kv_x=x, biases=biases, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma
603
+ )
604
+
605
+ if not self.starting:
606
+ x = x.transpose(-2, -3)
607
+
608
+ return x
609
+
610
+
611
+ class EsmFoldTriangleMultiplicativeUpdate(nn.Module):
612
+ """
613
+ Implements Algorithms 11 and 12.
614
+ """
615
+
616
+ def __init__(self, config, _outgoing=True):
617
+ super().__init__()
618
+ c_hidden = config.pairwise_state_dim
619
+ self._outgoing = _outgoing
620
+
621
+ self.linear_a_p = EsmFoldLinear(c_hidden, c_hidden)
622
+ self.linear_a_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
623
+ self.linear_b_p = EsmFoldLinear(c_hidden, c_hidden)
624
+ self.linear_b_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
625
+ self.linear_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
626
+ self.linear_z = EsmFoldLinear(c_hidden, c_hidden, init="final")
627
+
628
+ self.layer_norm_in = LayerNorm(c_hidden)
629
+ self.layer_norm_out = LayerNorm(c_hidden)
630
+
631
+ self.sigmoid = nn.Sigmoid()
632
+
633
+ def _combine_projections(
634
+ self, a: torch.Tensor, b: torch.Tensor, _inplace_chunk_size: Optional[int] = None
635
+ ) -> torch.Tensor:
636
+ if self._outgoing:
637
+ a = permute_final_dims(a, (2, 0, 1))
638
+ b = permute_final_dims(b, (2, 1, 0))
639
+ else:
640
+ a = permute_final_dims(a, (2, 1, 0))
641
+ b = permute_final_dims(b, (2, 0, 1))
642
+
643
+ if _inplace_chunk_size is not None:
644
+ # To be replaced by torch vmap
645
+ for i in range(0, a.shape[-3], _inplace_chunk_size):
646
+ a_chunk = a[..., i : i + _inplace_chunk_size, :, :]
647
+ b_chunk = b[..., i : i + _inplace_chunk_size, :, :]
648
+ a[..., i : i + _inplace_chunk_size, :, :] = torch.matmul(
649
+ a_chunk,
650
+ b_chunk,
651
+ )
652
+
653
+ p = a
654
+ else:
655
+ p = torch.matmul(a, b)
656
+
657
+ return permute_final_dims(p, (1, 2, 0))
658
+
659
+ def _inference_forward(
660
+ self,
661
+ z: torch.Tensor,
662
+ mask: Optional[torch.Tensor] = None,
663
+ inplace_chunk_size: Optional[int] = None,
664
+ with_add: bool = True,
665
+ ):
666
+ """
667
+ Args:
668
+ z:
669
+ A [*, N, N, C_z] pair representation
670
+ mask:
671
+ A [*, N, N] pair mask
672
+ inplace_chunk_size:
673
+ Size of chunks used in the main computation. Increase to trade memory for speed.
674
+ with_add:
675
+ If True, z is overwritten with (z + update). Otherwise, it is overwritten with (update).
676
+ Returns:
677
+ A reference to the overwritten z
678
+
679
+ More memory-efficient, inference-only version of the forward function. Uses in-place operations, fusion of the
680
+ addition that happens after this module in the Evoformer, a smidge of recomputation, and a cache of overwritten
681
+ values to lower peak memory consumption of this module from 5x the size of the input tensor z to 2.5x its size.
682
+ Useful for inference on extremely long sequences.
683
+
684
+ It works as follows. We will make reference to variables used in the default forward implementation below.
685
+ Naively, triangle multiplication attention requires the manifestation of 5 tensors the size of z: 1) z, the
686
+ "square" input tensor, 2) a, the first projection of z, 3) b, the second projection of b, 4) g, a z-sized mask,
687
+ and 5) a z-sized tensor for intermediate computations. For large N, this is prohibitively expensive; for
688
+ N=4000, for example, z is more than 8GB alone. To avoid this problem, we compute b, g, and all intermediate
689
+ tensors in small chunks, noting that the chunks required to compute a chunk of the output depend only on the
690
+ tensor a and corresponding vertical and horizontal chunks of z. This suggests an algorithm that loops over
691
+ pairs of chunks of z: hereafter "columns" and "rows" of z, even though each "column" and "row" in fact contains
692
+ inplace_chunk_size contiguous true columns and rows of z. Writing output chunks to a new tensor would bring
693
+ total memory consumption down to 3x the size of z. However, more memory can be saved by writing output chunks
694
+ directly to z in-place. WLOG, we choose to write output chunks vertically, overwriting the ith "column" of z at
695
+ the end of the ith iteration of the main loop. Despite this overwriting, the ith column is always one column
696
+ ahead of previously overwritten columns and can be recovered directly from z. After the first iteration,
697
+ however, the ith row of z is always at least partially overwritten. For this reason, we introduce the z-cache,
698
+ a tensor one-half the size of z. The z-cache initially contains the left half (2nd and 3rd quadrants) of z. For
699
+ 0 < i < N/2, the missing left part of the ith row of z is recovered from this cache at the beginning of the ith
700
+ iteration. Once i exceeds n/2, the cache is "reoriented" to encompass the 3rd and 4th quadrants of z instead.
701
+ Though the 3rd quadrant of the original z is entirely overwritten at this point, it can be recovered from the
702
+ z-cache itself. Thereafter, the ith row of z can be recovered in its entirety from the reoriented z-cache.
703
+ After the final iteration, z has been completely overwritten and contains the triangular multiplicative update.
704
+ If with_add is True, it instead contains the sum of z and the triangular multiplicative update. In either case,
705
+ peak memory consumption is just 2.5x the size of z, disregarding memory used for chunks and other small
706
+ variables.
707
+ """
708
+ if mask is None:
709
+ mask = z.new_ones(z.shape[:-1])
710
+
711
+ mask = mask.unsqueeze(-1)
712
+
713
+ def compute_projection_helper(pair, mask, a=True):
714
+ if a:
715
+ linear_g = self.linear_a_g
716
+ linear_p = self.linear_a_p
717
+ else:
718
+ linear_g = self.linear_b_g
719
+ linear_p = self.linear_b_p
720
+
721
+ pair = self.layer_norm_in(pair)
722
+ p = linear_g(pair)
723
+ p.sigmoid_()
724
+ p *= linear_p(pair)
725
+ p *= mask
726
+ p = permute_final_dims(p, (2, 0, 1))
727
+ return p
728
+
729
+ def compute_projection(pair, mask, a=True, chunked=True):
730
+ need_transpose = self._outgoing ^ a
731
+ if not chunked:
732
+ p = compute_projection_helper(pair, mask, a)
733
+ if need_transpose:
734
+ p = p.transpose(-1, -2)
735
+ else:
736
+ # This computation is chunked so as not to exceed our 2.5x
737
+ # budget with a large intermediate tensor
738
+ linear_g = self.linear_a_g if a else self.linear_b_g
739
+ c = linear_g.bias.shape[-1]
740
+ out_shape = pair.shape[:-3] + (c,) + pair.shape[-3:-1]
741
+ p = pair.new_zeros(out_shape)
742
+ for i in range(0, pair.shape[-3], inplace_chunk_size):
743
+ pair_chunk = pair[..., i : i + inplace_chunk_size, :, :]
744
+ pair_chunk = compute_projection_helper(
745
+ pair[..., i : i + inplace_chunk_size, :, :],
746
+ mask[..., i : i + inplace_chunk_size, :, :],
747
+ a,
748
+ )
749
+ if need_transpose:
750
+ pair_chunk = pair_chunk.transpose(-1, -2)
751
+ p[..., i : i + inplace_chunk_size] = pair_chunk
752
+ else:
753
+ p[..., i : i + inplace_chunk_size, :] = pair_chunk
754
+
755
+ del pair_chunk
756
+
757
+ return p
758
+
759
+ # We start by fully manifesting a. In addition to the input, this
760
+ # brings total memory consumption to 2x z (disregarding size of chunks)
761
+ # [*, N, N, c]
762
+ a = compute_projection(z, mask, True, chunked=True)
763
+
764
+ if inplace_chunk_size is not None:
765
+ n = a.shape[-1]
766
+ half_n = n // 2 + n % 2
767
+ row_dim = -3
768
+ col_dim = -2
769
+ b_chunk_dim = row_dim if self._outgoing else col_dim
770
+
771
+ def empty_slicer(t):
772
+ return [slice(None) for _ in t.shape]
773
+
774
+ def slice_tensor(t, start, end, dim):
775
+ # Slices start:end from the dim dimension of t
776
+ s = empty_slicer(t)
777
+ s[dim] = slice(start, end)
778
+ return t[s]
779
+
780
+ def flip_z_cache_(z_cache, z):
781
+ # "Reorient" the z_cache (see below), filling it with quadrants
782
+ # 3---recovered from the z_cache---and 4---recovered from z---
783
+ # of the input tensor z.
784
+ quadrant_3 = slice_tensor(z_cache, half_n, None, row_dim)
785
+ z_cache = z_cache.transpose(row_dim, col_dim)
786
+
787
+ # If n is odd, we need to shrink the z_cache by one row
788
+ z_cache = z_cache[..., : (n // 2), :, :]
789
+
790
+ # Move the 3rd quadrant of z into the
791
+ first_half_slicer = empty_slicer(z_cache)
792
+ first_half_slicer[col_dim] = slice(0, half_n)
793
+ z_cache[first_half_slicer] = quadrant_3
794
+
795
+ # Get the fourth quadrant of z
796
+ quadrant_4 = slice_tensor(z, half_n, None, row_dim)
797
+ quadrant_4 = slice_tensor(quadrant_4, half_n, None, col_dim)
798
+
799
+ # Insert said quadrant into the rotated z-cache
800
+ quadrant_3_slicer = empty_slicer(z_cache)
801
+ quadrant_3_slicer[col_dim] = slice(half_n, None)
802
+
803
+ z_cache[quadrant_3_slicer] = quadrant_4
804
+
805
+ return z_cache
806
+
807
+ # Initialize the z cache to the left half of z.
808
+ z_cache_shape = list(z.shape)
809
+ z_cache_shape[col_dim] = half_n
810
+ z_cache = z.new_zeros(z_cache_shape)
811
+ z_cache_slicer = empty_slicer(z_cache)
812
+ z_cache_slicer[col_dim] = slice(0, half_n)
813
+ z_cache.copy_(z[z_cache_slicer])
814
+ z_cache_rotated = False
815
+
816
+ # We need to reorient the z-cache at the halfway point, and we
817
+ # don't want a single chunk to straddle that point. We contract one
818
+ # of the chunks in the middle to address that problem.
819
+ i_range = list(range(0, half_n, inplace_chunk_size))
820
+ initial_offsets = [i_2 - i_1 for i_1, i_2 in zip(i_range, i_range[1:] + [half_n])]
821
+ after_half = list(range(half_n, n, inplace_chunk_size))
822
+ after_half_offsets = [inplace_chunk_size for _ in after_half]
823
+ combined_range_with_offsets = zip(i_range + after_half, initial_offsets + after_half_offsets)
824
+ for i, offset in combined_range_with_offsets:
825
+ if not z_cache_rotated and i >= half_n:
826
+ z_cache = flip_z_cache_(z_cache, z)
827
+ z_cache_rotated = True
828
+
829
+ z_chunk_b = slice_tensor(z, i, i + offset, b_chunk_dim)
830
+ mask_chunk = slice_tensor(mask, i, i + offset, b_chunk_dim)
831
+
832
+ z_chunk_b = z_chunk_b.clone()
833
+ if b_chunk_dim == col_dim:
834
+ z_chunk_b = slice_tensor(z, i, i + offset, col_dim)
835
+ else: # b_chunk_dim == row_dim
836
+ # In this case, the b-dimension (b_chunk_dim) is partially
837
+ # overwritten at the end of each iteration. We need to
838
+ # restore the missing component from the z-cache.
839
+ if not z_cache_rotated:
840
+ z_chunk_slicer = empty_slicer(z_chunk_b)
841
+ z_chunk_slicer[col_dim] = slice(0, half_n)
842
+ z_chunk_b[z_chunk_slicer] = slice_tensor(z_cache, i, i + offset, row_dim)
843
+ else:
844
+ z_cache_offset = i - half_n
845
+ z_chunk_b = slice_tensor(z_cache, z_cache_offset, z_cache_offset + offset, row_dim)
846
+
847
+ b_chunk = compute_projection(z_chunk_b, mask_chunk, a=False, chunked=False)
848
+ del z_chunk_b
849
+
850
+ x_chunk = torch.matmul(a, b_chunk)
851
+ x_chunk = permute_final_dims(x_chunk, (1, 2, 0))
852
+ x_chunk = self.layer_norm_out(x_chunk)
853
+ x_chunk = self.linear_z(x_chunk)
854
+
855
+ # The g dimension (col_dim) is parallel to and ahead of the
856
+ # overwrites in z. We can extract the g chunk normally.
857
+ z_chunk_g = slice_tensor(z, i, i + offset, col_dim)
858
+ g_chunk = self.linear_g(self.layer_norm_in(z_chunk_g))
859
+ g_chunk.sigmoid_()
860
+ del z_chunk_g
861
+
862
+ x_chunk *= g_chunk
863
+
864
+ # Write the columns into z in-place
865
+ z_slicer = empty_slicer(z)
866
+ z_slicer[col_dim] = slice(i, i + offset)
867
+ if with_add:
868
+ z[z_slicer] += x_chunk
869
+ else:
870
+ z[z_slicer] = x_chunk
871
+ else:
872
+ b = compute_projection(z, mask, False, False)
873
+ x = torch.matmul(a, b)
874
+ x = self.layer_norm_out(x)
875
+ x = self.linear_z(x)
876
+ g = self.linear_g(z)
877
+ g.sigmoid_()
878
+ x *= g
879
+ if with_add:
880
+ z += x
881
+ else:
882
+ z = x
883
+
884
+ return z
885
+
886
+ def forward(
887
+ self,
888
+ z: torch.Tensor,
889
+ mask: Optional[torch.Tensor] = None,
890
+ inplace_safe: bool = False,
891
+ _add_with_inplace: bool = False,
892
+ _inplace_chunk_size: Optional[int] = 256,
893
+ ) -> torch.Tensor:
894
+ """
895
+ Args:
896
+ x:
897
+ [*, N_res, N_res, C_z] input tensor
898
+ mask:
899
+ [*, N_res, N_res] input mask
900
+ Returns:
901
+ [*, N_res, N_res, C_z] output tensor
902
+ """
903
+ if inplace_safe:
904
+ x = self._inference_forward(
905
+ z,
906
+ mask,
907
+ inplace_chunk_size=_inplace_chunk_size,
908
+ with_add=_add_with_inplace,
909
+ )
910
+ return x
911
+
912
+ if mask is None:
913
+ mask = z.new_ones(z.shape[:-1])
914
+
915
+ mask = mask.unsqueeze(-1)
916
+
917
+ z = self.layer_norm_in(z)
918
+ a = mask
919
+ a = a * self.sigmoid(self.linear_a_g(z))
920
+ a = a * self.linear_a_p(z)
921
+ b = mask
922
+ b = b * self.sigmoid(self.linear_b_g(z))
923
+ b = b * self.linear_b_p(z)
924
+
925
+ if is_fp16_enabled():
926
+ with torch.cuda.amp.autocast(enabled=False):
927
+ x = self._combine_projections(a.float(), b.float())
928
+ else:
929
+ x = self._combine_projections(a, b)
930
+
931
+ del a, b
932
+ x = self.layer_norm_out(x)
933
+ x = self.linear_z(x)
934
+ g = self.sigmoid(self.linear_g(z))
935
+ x = x * g
936
+
937
+ return x
938
+
939
+
940
+ class EsmFoldPreTrainedModel(EsmPreTrainedModel):
941
+ """
942
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
943
+ models.
944
+ """
945
+
946
+ # Subclass `EsMPreTrainedModel` to deal with special init
947
+ def _init_weights(self, module):
948
+ """Initialize the weights"""
949
+ if isinstance(module, EsmFoldLinear):
950
+ with torch.no_grad():
951
+ if module.init_fn is not None:
952
+ module.init_fn(module.weight, module.bias)
953
+ elif module.init == "default":
954
+ trunc_normal_init_(module.weight, scale=1.0)
955
+ elif module.init == "relu":
956
+ trunc_normal_init_(module.weight, scale=2.0)
957
+ elif module.init == "glorot":
958
+ nn.init.xavier_uniform_(module.weight, gain=1)
959
+ elif module.init == "gating":
960
+ module.weight.fill_(0.0)
961
+ if module.bias:
962
+ module.bias.fill_(1.0)
963
+ elif module.init == "normal":
964
+ torch.nn.init.kaiming_normal_(module.weight, nonlinearity="linear")
965
+ elif module.init == "final":
966
+ module.weight.fill_(0.0)
967
+ elif isinstance(module, EsmFoldInvariantPointAttention):
968
+ ipa_point_weights_init_(module.head_weights)
969
+ elif isinstance(module, EsmFoldTriangularSelfAttentionBlock):
970
+ torch.nn.init.zeros_(module.tri_mul_in.linear_z.weight)
971
+ torch.nn.init.zeros_(module.tri_mul_in.linear_z.bias)
972
+ torch.nn.init.zeros_(module.tri_mul_out.linear_z.weight)
973
+ torch.nn.init.zeros_(module.tri_mul_out.linear_z.bias)
974
+ torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.weight)
975
+ torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.bias)
976
+ torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.weight)
977
+ torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.bias)
978
+
979
+ torch.nn.init.zeros_(module.sequence_to_pair.o_proj.weight)
980
+ torch.nn.init.zeros_(module.sequence_to_pair.o_proj.bias)
981
+ torch.nn.init.zeros_(module.pair_to_sequence.linear.weight)
982
+ torch.nn.init.zeros_(module.seq_attention.o_proj.weight)
983
+ torch.nn.init.zeros_(module.seq_attention.o_proj.bias)
984
+ torch.nn.init.zeros_(module.mlp_seq.mlp[-2].weight)
985
+ torch.nn.init.zeros_(module.mlp_seq.mlp[-2].bias)
986
+ torch.nn.init.zeros_(module.mlp_pair.mlp[-2].weight)
987
+ torch.nn.init.zeros_(module.mlp_pair.mlp[-2].bias)
988
+ else:
989
+ super()._init_weights(module)
990
+
991
+
992
+ class EsmFoldSelfAttention(nn.Module):
993
+ def __init__(self, embed_dim, num_heads, head_width, gated=False):
994
+ super().__init__()
995
+ assert embed_dim == num_heads * head_width
996
+
997
+ self.embed_dim = embed_dim
998
+ self.num_heads = num_heads
999
+ self.head_width = head_width
1000
+
1001
+ self.proj = nn.Linear(embed_dim, embed_dim * 3, bias=False)
1002
+ self.o_proj = nn.Linear(embed_dim, embed_dim, bias=True)
1003
+ self.gated = gated
1004
+ if gated:
1005
+ self.g_proj = nn.Linear(embed_dim, embed_dim)
1006
+ torch.nn.init.zeros_(self.g_proj.weight)
1007
+ torch.nn.init.ones_(self.g_proj.bias)
1008
+
1009
+ self.rescale_factor = self.head_width**-0.5
1010
+
1011
+ torch.nn.init.zeros_(self.o_proj.bias)
1012
+
1013
+ def forward(self, x, mask=None, bias=None, indices=None):
1014
+ """
1015
+ Basic self attention with optional mask and external pairwise bias. To handle sequences of different lengths,
1016
+ use mask.
1017
+
1018
+ Inputs:
1019
+ x: batch of input sequneces (.. x L x C) mask: batch of boolean masks where 1=valid, 0=padding position (..
1020
+ x L_k) bias: batch of scalar pairwise attention biases (.. x Lq x Lk x num_heads)
1021
+
1022
+ Outputs:
1023
+ sequence projection (B x L x embed_dim), attention maps (B x L x L x num_heads)
1024
+ """
1025
+
1026
+ t = self.proj(x).view(*x.shape[:2], self.num_heads, -1)
1027
+ t = t.permute(0, 2, 1, 3)
1028
+ q, k, v = t.chunk(3, dim=-1)
1029
+
1030
+ q = self.rescale_factor * q
1031
+ a = torch.einsum("...qc,...kc->...qk", q, k)
1032
+
1033
+ # Add external attention bias.
1034
+ if bias is not None:
1035
+ a = a + bias.permute(0, 3, 1, 2)
1036
+
1037
+ # Do not attend to padding tokens.
1038
+ if mask is not None:
1039
+ mask = mask[:, None, None]
1040
+ a = a.masked_fill(mask == False, -np.inf) # noqa: E712
1041
+
1042
+ a = nn.functional.softmax(a, dim=-1)
1043
+
1044
+ y = torch.einsum("...hqk,...hkc->...qhc", a, v)
1045
+ y = y.reshape(*y.shape[:2], -1)
1046
+
1047
+ if self.gated:
1048
+ y = self.g_proj(x).sigmoid() * y
1049
+ y = self.o_proj(y)
1050
+
1051
+ return y, a.permute(0, 3, 1, 2)
1052
+
1053
+
1054
+ class EsmFoldDropout(nn.Module):
1055
+ """
1056
+ Implementation of dropout with the ability to share the dropout mask along a particular dimension.
1057
+ """
1058
+
1059
+ def __init__(self, r: float, batch_dim: Union[int, List[int]]):
1060
+ super().__init__()
1061
+
1062
+ self.r = r
1063
+ if isinstance(batch_dim, int):
1064
+ batch_dim = [batch_dim]
1065
+ self.batch_dim = batch_dim
1066
+ self.dropout = nn.Dropout(self.r)
1067
+
1068
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1069
+ shape = list(x.shape)
1070
+ if self.batch_dim is not None:
1071
+ for bd in self.batch_dim:
1072
+ shape[bd] = 1
1073
+ return x * self.dropout(x.new_ones(shape))
1074
+
1075
+
1076
+ class EsmFoldSequenceToPair(nn.Module):
1077
+ def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim):
1078
+ super().__init__()
1079
+
1080
+ self.layernorm = nn.LayerNorm(sequence_state_dim)
1081
+ self.proj = nn.Linear(sequence_state_dim, inner_dim * 2, bias=True)
1082
+ self.o_proj = nn.Linear(2 * inner_dim, pairwise_state_dim, bias=True)
1083
+
1084
+ torch.nn.init.zeros_(self.proj.bias)
1085
+ torch.nn.init.zeros_(self.o_proj.bias)
1086
+
1087
+ def forward(self, sequence_state):
1088
+ """
1089
+ Inputs:
1090
+ sequence_state: B x L x sequence_state_dim
1091
+
1092
+ Output:
1093
+ pairwise_state: B x L x L x pairwise_state_dim
1094
+
1095
+ Intermediate state:
1096
+ B x L x L x 2*inner_dim
1097
+ """
1098
+
1099
+ assert len(sequence_state.shape) == 3
1100
+
1101
+ s = self.layernorm(sequence_state)
1102
+ s = self.proj(s)
1103
+ q, k = s.chunk(2, dim=-1)
1104
+
1105
+ prod = q[:, None, :, :] * k[:, :, None, :]
1106
+ diff = q[:, None, :, :] - k[:, :, None, :]
1107
+
1108
+ x = torch.cat([prod, diff], dim=-1)
1109
+ x = self.o_proj(x)
1110
+
1111
+ return x
1112
+
1113
+
1114
+ class EsmFoldPairToSequence(nn.Module):
1115
+ def __init__(self, pairwise_state_dim, num_heads):
1116
+ super().__init__()
1117
+
1118
+ self.layernorm = nn.LayerNorm(pairwise_state_dim)
1119
+ self.linear = nn.Linear(pairwise_state_dim, num_heads, bias=False)
1120
+
1121
+ def forward(self, pairwise_state):
1122
+ """
1123
+ Inputs:
1124
+ pairwise_state: B x L x L x pairwise_state_dim
1125
+
1126
+ Output:
1127
+ pairwise_bias: B x L x L x num_heads
1128
+ """
1129
+ assert len(pairwise_state.shape) == 4
1130
+ z = self.layernorm(pairwise_state)
1131
+ pairwise_bias = self.linear(z)
1132
+ return pairwise_bias
1133
+
1134
+
1135
+ class EsmFoldResidueMLP(nn.Module):
1136
+ def __init__(self, embed_dim, inner_dim, dropout=0):
1137
+ super().__init__()
1138
+
1139
+ self.mlp = nn.Sequential(
1140
+ nn.LayerNorm(embed_dim),
1141
+ nn.Linear(embed_dim, inner_dim),
1142
+ nn.ReLU(),
1143
+ nn.Linear(inner_dim, embed_dim),
1144
+ nn.Dropout(dropout),
1145
+ )
1146
+
1147
+ def forward(self, x):
1148
+ return x + self.mlp(x)
1149
+
1150
+
1151
+ class EsmFoldTriangularSelfAttentionBlock(nn.Module):
1152
+ def __init__(self, config):
1153
+ super().__init__()
1154
+ self.config = config
1155
+
1156
+ sequence_state_dim = config.sequence_state_dim
1157
+ pairwise_state_dim = config.pairwise_state_dim
1158
+ sequence_num_heads = sequence_state_dim // config.sequence_head_width
1159
+ pairwise_num_heads = pairwise_state_dim // config.pairwise_head_width
1160
+
1161
+ self.layernorm_1 = nn.LayerNorm(sequence_state_dim)
1162
+
1163
+ self.sequence_to_pair = EsmFoldSequenceToPair(sequence_state_dim, pairwise_state_dim // 2, pairwise_state_dim)
1164
+ self.pair_to_sequence = EsmFoldPairToSequence(pairwise_state_dim, sequence_num_heads)
1165
+
1166
+ self.seq_attention = EsmFoldSelfAttention(
1167
+ sequence_state_dim, sequence_num_heads, config.sequence_head_width, gated=True
1168
+ )
1169
+ self.tri_mul_out = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=True)
1170
+ self.tri_mul_in = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=False)
1171
+
1172
+ self.tri_att_start = EsmFoldTriangleAttention(
1173
+ pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=True
1174
+ )
1175
+ self.tri_att_end = EsmFoldTriangleAttention(
1176
+ pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=False
1177
+ )
1178
+
1179
+ self.mlp_seq = EsmFoldResidueMLP(sequence_state_dim, 4 * sequence_state_dim, dropout=config.dropout)
1180
+ self.mlp_pair = EsmFoldResidueMLP(pairwise_state_dim, 4 * pairwise_state_dim, dropout=config.dropout)
1181
+
1182
+ self.drop = nn.Dropout(config.dropout)
1183
+ self.row_drop = EsmFoldDropout(config.dropout * 2, 2)
1184
+ self.col_drop = EsmFoldDropout(config.dropout * 2, 1)
1185
+
1186
+ def forward(self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs):
1187
+ """
1188
+ Inputs:
1189
+ sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim mask: B x L boolean
1190
+ tensor of valid positions
1191
+
1192
+ Output:
1193
+ sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim
1194
+ """
1195
+ if len(sequence_state.shape) != 3:
1196
+ raise ValueError(f"`sequence_state` should be a 3d-tensor, got {len(sequence_state.shape)} dims.")
1197
+ if len(pairwise_state.shape) != 4:
1198
+ raise ValueError(f"`pairwise_state` should be a 4d-tensor, got {len(pairwise_state.shape)} dims.")
1199
+ if mask is not None and len(mask.shape) != 2:
1200
+ raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.")
1201
+
1202
+ batch_dim, seq_dim, sequence_state_dim = sequence_state.shape
1203
+ pairwise_state_dim = pairwise_state.shape[3]
1204
+
1205
+ if sequence_state_dim != self.config.sequence_state_dim:
1206
+ raise ValueError(
1207
+ "`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got "
1208
+ f"{sequence_state_dim} != {self.config.sequence_state_dim}."
1209
+ )
1210
+ if pairwise_state_dim != self.config.pairwise_state_dim:
1211
+ raise ValueError(
1212
+ "`pairwise_state` last dimension should be equal to `self.pairwise_state_dim`. Got "
1213
+ f"{pairwise_state_dim} != {self.config.pairwise_state_dim}."
1214
+ )
1215
+ if batch_dim != pairwise_state.shape[0]:
1216
+ raise ValueError(
1217
+ f"`sequence_state` and `pairwise_state` have inconsistent batch size: {batch_dim} != "
1218
+ f"{pairwise_state.shape[0]}."
1219
+ )
1220
+ if seq_dim != pairwise_state.shape[1] or seq_dim != pairwise_state.shape[2]:
1221
+ raise ValueError(
1222
+ f"`sequence_state` and `pairwise_state` have inconsistent sequence length: {seq_dim} != "
1223
+ f"{pairwise_state.shape[1]} or {pairwise_state.shape[2]}."
1224
+ )
1225
+
1226
+ # Update sequence state
1227
+ bias = self.pair_to_sequence(pairwise_state)
1228
+
1229
+ # Self attention with bias + mlp.
1230
+ y = self.layernorm_1(sequence_state)
1231
+ y, _ = self.seq_attention(y, mask=mask, bias=bias)
1232
+ sequence_state = sequence_state + self.drop(y)
1233
+ sequence_state = self.mlp_seq(sequence_state)
1234
+
1235
+ # Update pairwise state
1236
+ pairwise_state = pairwise_state + self.sequence_to_pair(sequence_state)
1237
+
1238
+ # Axial attention with triangular bias.
1239
+ tri_mask = mask.unsqueeze(2) * mask.unsqueeze(1) if mask is not None else None
1240
+ pairwise_state = pairwise_state + self.row_drop(self.tri_mul_out(pairwise_state, mask=tri_mask))
1241
+ pairwise_state = pairwise_state + self.col_drop(self.tri_mul_in(pairwise_state, mask=tri_mask))
1242
+ pairwise_state = pairwise_state + self.row_drop(
1243
+ self.tri_att_start(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
1244
+ )
1245
+ pairwise_state = pairwise_state + self.col_drop(
1246
+ self.tri_att_end(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
1247
+ )
1248
+
1249
+ # MLP over pairs.
1250
+ pairwise_state = self.mlp_pair(pairwise_state)
1251
+
1252
+ return sequence_state, pairwise_state
1253
+
1254
+
1255
+ class EsmCategoricalMixture:
1256
+ def __init__(self, param, bins=50, start=0, end=1):
1257
+ # All tensors are of shape ..., bins.
1258
+ self.logits = param
1259
+ bins = torch.linspace(start, end, bins + 1, device=self.logits.device, dtype=self.logits.dtype)
1260
+ self.v_bins = (bins[:-1] + bins[1:]) / 2
1261
+
1262
+ def log_prob(self, true):
1263
+ # Shapes are:
1264
+ # self.probs: ... x bins
1265
+ # true : ...
1266
+ true_index = (true.unsqueeze(-1) - self.v_bins[[None] * true.ndim]).abs().argmin(-1)
1267
+ nll = self.logits.log_softmax(-1)
1268
+ return torch.take_along_dim(nll, true_index.unsqueeze(-1), dim=-1).squeeze(-1)
1269
+
1270
+ def mean(self):
1271
+ return (self.logits.softmax(-1) @ self.v_bins.unsqueeze(1)).squeeze(-1)
1272
+
1273
+
1274
+ def categorical_lddt(logits, bins=50):
1275
+ # Logits are ..., 37, bins.
1276
+ return EsmCategoricalMixture(logits, bins=bins).mean()
1277
+
1278
+
1279
+ def get_axial_mask(mask):
1280
+ """
1281
+ Helper to convert B x L mask of valid positions to axial mask used in row column attentions.
1282
+
1283
+ Input:
1284
+ mask: B x L tensor of booleans
1285
+
1286
+ Output:
1287
+ mask: B x L x L tensor of booleans
1288
+ """
1289
+
1290
+ if mask is None:
1291
+ return None
1292
+
1293
+ if len(mask.shape) != 2:
1294
+ raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.")
1295
+ batch_dim, seq_dim = mask.shape
1296
+ m = mask.unsqueeze(1).expand(batch_dim, seq_dim, seq_dim)
1297
+ m = m.reshape(batch_dim * seq_dim, seq_dim)
1298
+ return m
1299
+
1300
+
1301
+ class EsmFoldRelativePosition(nn.Module):
1302
+ def __init__(self, config):
1303
+ super().__init__()
1304
+ self.bins = config.position_bins
1305
+
1306
+ # Note an additional offset is used so that the 0th position
1307
+ # is reserved for masked pairs.
1308
+ self.embedding = torch.nn.Embedding(2 * self.bins + 2, config.pairwise_state_dim)
1309
+
1310
+ def forward(self, residue_index, mask=None):
1311
+ """
1312
+ Input:
1313
+ residue_index: B x L tensor of indices (dytpe=torch.long) mask: B x L tensor of booleans
1314
+
1315
+ Output:
1316
+ pairwise_state: B x L x L x pairwise_state_dim tensor of embeddings
1317
+ """
1318
+ if residue_index.dtype != torch.long:
1319
+ raise ValueError(f"`residue_index` has dtype {residue_index.dtype}, it should be `torch.long`.")
1320
+ if mask is not None and residue_index.shape != mask.shape:
1321
+ raise ValueError(
1322
+ f"`residue_index` and `mask` have inconsistent shapes: {residue_index.shape} != {mask.shape}."
1323
+ )
1324
+
1325
+ diff = residue_index[:, None, :] - residue_index[:, :, None]
1326
+ diff = diff.clamp(-self.bins, self.bins)
1327
+ diff = diff + self.bins + 1 # Add 1 to adjust for padding index.
1328
+
1329
+ if mask is not None:
1330
+ mask = mask[:, None, :] * mask[:, :, None]
1331
+ diff[mask == False] = 0 # noqa: E712
1332
+
1333
+ output = self.embedding(diff)
1334
+ return output
1335
+
1336
+
1337
+ class EsmFoldAngleResnetBlock(nn.Module):
1338
+ def __init__(self, config):
1339
+ super().__init__()
1340
+
1341
+ self.linear_1 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="relu")
1342
+ self.linear_2 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="final")
1343
+
1344
+ self.relu = nn.ReLU()
1345
+
1346
+ def forward(self, a: torch.Tensor) -> torch.Tensor:
1347
+ s_initial = a
1348
+
1349
+ a = self.relu(a)
1350
+ a = self.linear_1(a)
1351
+ a = self.relu(a)
1352
+ a = self.linear_2(a)
1353
+
1354
+ return a + s_initial
1355
+
1356
+
1357
+ class EsmFoldAngleResnet(nn.Module):
1358
+ """
1359
+ Implements Algorithm 20, lines 11-14
1360
+ """
1361
+
1362
+ def __init__(self, config):
1363
+ super().__init__()
1364
+ self.config = config
1365
+
1366
+ self.linear_in = EsmFoldLinear(config.sequence_dim, config.resnet_dim)
1367
+ self.linear_initial = EsmFoldLinear(config.sequence_dim, config.resnet_dim)
1368
+
1369
+ self.layers = nn.ModuleList()
1370
+ for _ in range(config.num_resnet_blocks):
1371
+ layer = EsmFoldAngleResnetBlock(config)
1372
+ self.layers.append(layer)
1373
+
1374
+ self.linear_out = EsmFoldLinear(config.resnet_dim, config.num_angles * 2)
1375
+
1376
+ self.relu = nn.ReLU()
1377
+
1378
+ def forward(self, s: torch.Tensor, s_initial: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
1379
+ """
1380
+ Args:
1381
+ s:
1382
+ [*, C_hidden] single embedding
1383
+ s_initial:
1384
+ [*, C_hidden] single embedding as of the start of the StructureModule
1385
+ Returns:
1386
+ [*, no_angles, 2] predicted angles
1387
+ """
1388
+ # NOTE: The ReLU's applied to the inputs are absent from the supplement
1389
+ # pseudocode but present in the source. For maximal compatibility with
1390
+ # the pretrained weights, I'm going with the source.
1391
+
1392
+ # [*, C_hidden]
1393
+ s_initial = self.relu(s_initial)
1394
+ s_initial = self.linear_initial(s_initial)
1395
+ s = self.relu(s)
1396
+ s = self.linear_in(s)
1397
+ s = s + s_initial
1398
+
1399
+ for l in self.layers:
1400
+ s = l(s)
1401
+
1402
+ s = self.relu(s)
1403
+
1404
+ # [*, no_angles * 2]
1405
+ s = self.linear_out(s)
1406
+
1407
+ # [*, no_angles, 2]
1408
+ s = s.view(s.shape[:-1] + (-1, 2))
1409
+
1410
+ unnormalized_s = s
1411
+ norm_denom = torch.sqrt(
1412
+ torch.clamp(
1413
+ torch.sum(s**2, dim=-1, keepdim=True),
1414
+ min=self.config.epsilon,
1415
+ )
1416
+ )
1417
+ s = s / norm_denom
1418
+
1419
+ return unnormalized_s, s
1420
+
1421
+
1422
+ class EsmFoldInvariantPointAttention(nn.Module):
1423
+ """
1424
+ Implements Algorithm 22.
1425
+ """
1426
+
1427
+ def __init__(self, config):
1428
+ super().__init__()
1429
+ self.config = config
1430
+
1431
+ c_s = config.sequence_dim
1432
+ c_z = config.pairwise_dim
1433
+ self.hidden_dim = config.ipa_dim
1434
+ self.num_heads = config.num_heads_ipa
1435
+ self.num_qk_points = config.num_qk_points
1436
+ self.num_v_points = config.num_v_points
1437
+
1438
+ # These linear layers differ from their specifications in the
1439
+ # supplement. There, they lack bias and use Glorot initialization.
1440
+ # Here as in the official source, they have bias and use the default
1441
+ # Lecun initialization.
1442
+ hc = config.ipa_dim * config.num_heads_ipa
1443
+ self.linear_q = EsmFoldLinear(c_s, hc)
1444
+ self.linear_kv = EsmFoldLinear(c_s, 2 * hc)
1445
+
1446
+ hpq = config.num_heads_ipa * config.num_qk_points * 3
1447
+ self.linear_q_points = EsmFoldLinear(c_s, hpq)
1448
+
1449
+ hpkv = config.num_heads_ipa * (config.num_qk_points + config.num_v_points) * 3
1450
+ self.linear_kv_points = EsmFoldLinear(c_s, hpkv)
1451
+
1452
+ self.linear_b = EsmFoldLinear(c_z, config.num_heads_ipa)
1453
+
1454
+ self.head_weights = nn.Parameter(torch.zeros((config.num_heads_ipa)))
1455
+
1456
+ concat_out_dim = config.num_heads_ipa * (c_z + config.ipa_dim + config.num_v_points * 4)
1457
+ self.linear_out = EsmFoldLinear(concat_out_dim, c_s, init="final")
1458
+
1459
+ self.softmax = nn.Softmax(dim=-1)
1460
+ self.softplus = nn.Softplus()
1461
+
1462
+ def forward(
1463
+ self,
1464
+ s: torch.Tensor,
1465
+ z: Optional[torch.Tensor],
1466
+ r: Rigid,
1467
+ mask: torch.Tensor,
1468
+ _offload_inference: bool = False,
1469
+ _z_reference_list: Optional[Sequence[torch.Tensor]] = None,
1470
+ ) -> torch.Tensor:
1471
+ """
1472
+ Args:
1473
+ s:
1474
+ [*, N_res, C_s] single representation
1475
+ z:
1476
+ [*, N_res, N_res, C_z] pair representation
1477
+ r:
1478
+ [*, N_res] transformation object
1479
+ mask:
1480
+ [*, N_res] mask
1481
+ Returns:
1482
+ [*, N_res, C_s] single representation update
1483
+ """
1484
+ z = [z]
1485
+
1486
+ #######################################
1487
+ # Generate scalar and point activations
1488
+ #######################################
1489
+ # [*, N_res, H * C_hidden]
1490
+ q = self.linear_q(s)
1491
+ kv = self.linear_kv(s)
1492
+
1493
+ # [*, N_res, H, C_hidden]
1494
+ q = q.view(q.shape[:-1] + (self.num_heads, -1))
1495
+
1496
+ # [*, N_res, H, 2 * C_hidden]
1497
+ kv = kv.view(kv.shape[:-1] + (self.num_heads, -1))
1498
+
1499
+ # [*, N_res, H, C_hidden]
1500
+ k, v = torch.split(kv, self.hidden_dim, dim=-1)
1501
+
1502
+ # [*, N_res, H * P_q * 3]
1503
+ q_pts = self.linear_q_points(s)
1504
+
1505
+ # This is kind of clunky, but it's how the original does it
1506
+ # [*, N_res, H * P_q, 3]
1507
+ q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1)
1508
+ q_pts = torch.stack(q_pts, dim=-1)
1509
+ q_pts = r[..., None].apply(q_pts)
1510
+
1511
+ # [*, N_res, H, P_q, 3]
1512
+ q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3))
1513
+
1514
+ # [*, N_res, H * (P_q + P_v) * 3]
1515
+ kv_pts = self.linear_kv_points(s)
1516
+
1517
+ # [*, N_res, H * (P_q + P_v), 3]
1518
+ kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1)
1519
+ kv_pts = torch.stack(kv_pts, dim=-1)
1520
+ kv_pts = r[..., None].apply(kv_pts)
1521
+
1522
+ # [*, N_res, H, (P_q + P_v), 3]
1523
+ kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3))
1524
+
1525
+ # [*, N_res, H, P_q/P_v, 3]
1526
+ k_pts, v_pts = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2)
1527
+
1528
+ ##########################
1529
+ # Compute attention scores
1530
+ ##########################
1531
+ # [*, N_res, N_res, H]
1532
+ b = self.linear_b(z[0])
1533
+
1534
+ if _offload_inference:
1535
+ assert sys.getrefcount(z[0]) == 2
1536
+ z[0] = z[0].cpu()
1537
+
1538
+ # [*, H, N_res, N_res]
1539
+ if is_fp16_enabled():
1540
+ with torch.cuda.amp.autocast(enabled=False):
1541
+ a = torch.matmul(
1542
+ permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden]
1543
+ permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res]
1544
+ )
1545
+ else:
1546
+ a = torch.matmul(
1547
+ permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden]
1548
+ permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res]
1549
+ )
1550
+
1551
+ a *= math.sqrt(1.0 / (3 * self.hidden_dim))
1552
+ a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))
1553
+
1554
+ # [*, N_res, N_res, H, P_q, 3]
1555
+ pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)
1556
+ pt_att = pt_att**2
1557
+
1558
+ # [*, N_res, N_res, H, P_q]
1559
+ pt_att = sum(torch.unbind(pt_att, dim=-1))
1560
+ head_weights = self.softplus(self.head_weights).view(*((1,) * len(pt_att.shape[:-2]) + (-1, 1)))
1561
+ head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2)))
1562
+ pt_att = pt_att * head_weights
1563
+
1564
+ # [*, N_res, N_res, H]
1565
+ pt_att = torch.sum(pt_att, dim=-1) * (-0.5)
1566
+ # [*, N_res, N_res]
1567
+ square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)
1568
+ square_mask = self.config.inf * (square_mask - 1)
1569
+
1570
+ # [*, H, N_res, N_res]
1571
+ pt_att = permute_final_dims(pt_att, (2, 0, 1))
1572
+
1573
+ a = a + pt_att
1574
+ a = a + square_mask.unsqueeze(-3)
1575
+ a = self.softmax(a)
1576
+
1577
+ ################
1578
+ # Compute output
1579
+ ################
1580
+ # [*, N_res, H, C_hidden]
1581
+ o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3)
1582
+
1583
+ # [*, N_res, H * C_hidden]
1584
+ o = flatten_final_dims(o, 2)
1585
+
1586
+ # [*, H, 3, N_res, P_v]
1587
+ o_pt = torch.sum(
1588
+ (a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :]),
1589
+ dim=-2,
1590
+ )
1591
+
1592
+ # [*, N_res, H, P_v, 3]
1593
+ o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))
1594
+ o_pt = r[..., None, None].invert_apply(o_pt)
1595
+
1596
+ # [*, N_res, H * P_v]
1597
+ o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt**2, dim=-1) + self.config.epsilon), 2)
1598
+
1599
+ # [*, N_res, H * P_v, 3]
1600
+ o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)
1601
+
1602
+ if _offload_inference:
1603
+ z[0] = z[0].to(o_pt.device)
1604
+
1605
+ # [*, N_res, H, C_z]
1606
+ o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))
1607
+
1608
+ # [*, N_res, H * C_z]
1609
+ o_pair = flatten_final_dims(o_pair, 2)
1610
+
1611
+ # [*, N_res, C_s]
1612
+ s = self.linear_out(
1613
+ torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype)
1614
+ )
1615
+
1616
+ return s
1617
+
1618
+
1619
+ class EsmFoldBackboneUpdate(nn.Module):
1620
+ """
1621
+ Implements part of Algorithm 23.
1622
+ """
1623
+
1624
+ def __init__(self, config):
1625
+ super().__init__()
1626
+
1627
+ self.linear = EsmFoldLinear(config.sequence_dim, 6, init="final")
1628
+
1629
+ def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
1630
+ """
1631
+ Args:
1632
+ [*, N_res, C_s] single representation
1633
+ Returns:
1634
+ [*, N_res, 6] update vector
1635
+ """
1636
+ # [*, 6]
1637
+ update = self.linear(s)
1638
+
1639
+ return update
1640
+
1641
+
1642
+ class EsmFoldStructureModuleTransitionLayer(nn.Module):
1643
+ def __init__(self, config):
1644
+ super().__init__()
1645
+
1646
+ self.linear_1 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="relu")
1647
+ self.linear_2 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="relu")
1648
+ self.linear_3 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="final")
1649
+
1650
+ self.relu = nn.ReLU()
1651
+
1652
+ def forward(self, s):
1653
+ s_initial = s
1654
+ s = self.linear_1(s)
1655
+ s = self.relu(s)
1656
+ s = self.linear_2(s)
1657
+ s = self.relu(s)
1658
+ s = self.linear_3(s)
1659
+
1660
+ s = s + s_initial
1661
+
1662
+ return s
1663
+
1664
+
1665
+ class EsmFoldStructureModuleTransition(nn.Module):
1666
+ def __init__(self, config):
1667
+ super().__init__()
1668
+ self.config = config
1669
+
1670
+ self.layers = nn.ModuleList()
1671
+ for _ in range(config.num_transition_layers):
1672
+ l = EsmFoldStructureModuleTransitionLayer(config)
1673
+ self.layers.append(l)
1674
+
1675
+ self.dropout = nn.Dropout(config.dropout_rate)
1676
+ self.layer_norm = LayerNorm(config.sequence_dim)
1677
+
1678
+ def forward(self, s):
1679
+ for l in self.layers:
1680
+ s = l(s)
1681
+
1682
+ s = self.dropout(s)
1683
+ s = self.layer_norm(s)
1684
+
1685
+ return s
1686
+
1687
+
1688
+ class EsmFoldStructureModule(nn.Module):
1689
+ def __init__(self, config):
1690
+ super().__init__()
1691
+ self.config = config
1692
+
1693
+ # Buffers to be lazily initialized later
1694
+ # self.default_frames
1695
+ # self.group_idx
1696
+ # self.atom_mask
1697
+ # self.lit_positions
1698
+
1699
+ self.layer_norm_s = LayerNorm(config.sequence_dim)
1700
+ self.layer_norm_z = LayerNorm(config.pairwise_dim)
1701
+
1702
+ self.linear_in = EsmFoldLinear(config.sequence_dim, config.sequence_dim)
1703
+
1704
+ self.ipa = EsmFoldInvariantPointAttention(config)
1705
+
1706
+ self.ipa_dropout = nn.Dropout(config.dropout_rate)
1707
+ self.layer_norm_ipa = LayerNorm(config.sequence_dim)
1708
+
1709
+ self.transition = EsmFoldStructureModuleTransition(config)
1710
+ self.bb_update = EsmFoldBackboneUpdate(config)
1711
+ self.angle_resnet = EsmFoldAngleResnet(config)
1712
+
1713
+ def forward(
1714
+ self,
1715
+ evoformer_output_dict,
1716
+ aatype,
1717
+ mask=None,
1718
+ _offload_inference=False,
1719
+ ):
1720
+ """
1721
+ Args:
1722
+ evoformer_output_dict:
1723
+ Dictionary containing:
1724
+ "single":
1725
+ [*, N_res, C_s] single representation
1726
+ "pair":
1727
+ [*, N_res, N_res, C_z] pair representation
1728
+ aatype:
1729
+ [*, N_res] amino acid indices
1730
+ mask:
1731
+ Optional [*, N_res] sequence mask
1732
+ Returns:
1733
+ A dictionary of outputs
1734
+ """
1735
+ s = evoformer_output_dict["single"]
1736
+
1737
+ if mask is None:
1738
+ # [*, N]
1739
+ mask = s.new_ones(s.shape[:-1])
1740
+
1741
+ # [*, N, C_s]
1742
+ s = self.layer_norm_s(s)
1743
+
1744
+ # [*, N, N, C_z]
1745
+ z = self.layer_norm_z(evoformer_output_dict["pair"])
1746
+
1747
+ z_reference_list = None
1748
+ if _offload_inference:
1749
+ assert sys.getrefcount(evoformer_output_dict["pair"]) == 2
1750
+ evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu()
1751
+ z_reference_list = [z]
1752
+ z = None
1753
+
1754
+ # [*, N, C_s]
1755
+ s_initial = s
1756
+ s = self.linear_in(s)
1757
+
1758
+ # [*, N]
1759
+ rigids = Rigid.identity(
1760
+ s.shape[:-1],
1761
+ s.dtype,
1762
+ s.device,
1763
+ self.training,
1764
+ fmt="quat",
1765
+ )
1766
+ outputs = []
1767
+ for i in range(self.config.num_blocks):
1768
+ # [*, N, C_s]
1769
+ s = s + self.ipa(
1770
+ s,
1771
+ z,
1772
+ rigids,
1773
+ mask,
1774
+ _offload_inference=_offload_inference,
1775
+ _z_reference_list=z_reference_list,
1776
+ )
1777
+ s = self.ipa_dropout(s)
1778
+ s = self.layer_norm_ipa(s)
1779
+ s = self.transition(s)
1780
+
1781
+ # [*, N]
1782
+ rigids = rigids.compose_q_update_vec(self.bb_update(s))
1783
+
1784
+ # To hew as closely as possible to AlphaFold, we convert our
1785
+ # quaternion-based transformations to rotation-matrix ones
1786
+ # here
1787
+ backb_to_global = Rigid(
1788
+ Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None),
1789
+ rigids.get_trans(),
1790
+ )
1791
+
1792
+ backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor)
1793
+
1794
+ # [*, N, 7, 2]
1795
+ unnormalized_angles, angles = self.angle_resnet(s, s_initial)
1796
+
1797
+ all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype)
1798
+
1799
+ pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype)
1800
+
1801
+ scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor)
1802
+
1803
+ preds = {
1804
+ "frames": scaled_rigids.to_tensor_7(),
1805
+ "sidechain_frames": all_frames_to_global.to_tensor_4x4(),
1806
+ "unnormalized_angles": unnormalized_angles,
1807
+ "angles": angles,
1808
+ "positions": pred_xyz,
1809
+ "states": s,
1810
+ }
1811
+
1812
+ outputs.append(preds)
1813
+
1814
+ rigids = rigids.stop_rot_gradient()
1815
+
1816
+ del z, z_reference_list
1817
+
1818
+ if _offload_inference:
1819
+ evoformer_output_dict["pair"] = evoformer_output_dict["pair"].to(s.device)
1820
+
1821
+ outputs = dict_multimap(torch.stack, outputs)
1822
+ outputs["single"] = s
1823
+
1824
+ return outputs
1825
+
1826
+ def _init_residue_constants(self, float_dtype, device):
1827
+ if not hasattr(self, "default_frames"):
1828
+ self.register_buffer(
1829
+ "default_frames",
1830
+ torch.tensor(
1831
+ residue_constants.restype_rigid_group_default_frame,
1832
+ dtype=float_dtype,
1833
+ device=device,
1834
+ requires_grad=False,
1835
+ ),
1836
+ persistent=False,
1837
+ )
1838
+ if not hasattr(self, "group_idx"):
1839
+ self.register_buffer(
1840
+ "group_idx",
1841
+ torch.tensor(
1842
+ residue_constants.restype_atom14_to_rigid_group,
1843
+ device=device,
1844
+ requires_grad=False,
1845
+ ),
1846
+ persistent=False,
1847
+ )
1848
+ if not hasattr(self, "atom_mask"):
1849
+ self.register_buffer(
1850
+ "atom_mask",
1851
+ torch.tensor(
1852
+ residue_constants.restype_atom14_mask,
1853
+ dtype=float_dtype,
1854
+ device=device,
1855
+ requires_grad=False,
1856
+ ),
1857
+ persistent=False,
1858
+ )
1859
+ if not hasattr(self, "lit_positions"):
1860
+ self.register_buffer(
1861
+ "lit_positions",
1862
+ torch.tensor(
1863
+ residue_constants.restype_atom14_rigid_group_positions,
1864
+ dtype=float_dtype,
1865
+ device=device,
1866
+ requires_grad=False,
1867
+ ),
1868
+ persistent=False,
1869
+ )
1870
+
1871
+ def torsion_angles_to_frames(self, r, alpha, f):
1872
+ # Lazily initialize the residue constants on the correct device
1873
+ self._init_residue_constants(alpha.dtype, alpha.device)
1874
+ # Separated purely to make testing less annoying
1875
+ return torsion_angles_to_frames(r, alpha, f, self.default_frames)
1876
+
1877
+ def frames_and_literature_positions_to_atom14_pos(self, r, f): # [*, N, 8] # [*, N]
1878
+ # Lazily initialize the residue constants on the correct device
1879
+ self._init_residue_constants(r.get_rots().dtype, r.get_rots().device)
1880
+ return frames_and_literature_positions_to_atom14_pos(
1881
+ r,
1882
+ f,
1883
+ self.default_frames,
1884
+ self.group_idx,
1885
+ self.atom_mask,
1886
+ self.lit_positions,
1887
+ )
1888
+
1889
+
1890
+ class EsmFoldingTrunk(nn.Module):
1891
+ def __init__(self, config):
1892
+ super().__init__()
1893
+ self.config = config
1894
+
1895
+ c_s = config.sequence_state_dim
1896
+ c_z = config.pairwise_state_dim
1897
+
1898
+ self.pairwise_positional_embedding = EsmFoldRelativePosition(config)
1899
+
1900
+ self.blocks = nn.ModuleList([EsmFoldTriangularSelfAttentionBlock(config) for _ in range(config.num_blocks)])
1901
+
1902
+ self.recycle_bins = 15
1903
+ self.recycle_s_norm = nn.LayerNorm(c_s)
1904
+ self.recycle_z_norm = nn.LayerNorm(c_z)
1905
+ self.recycle_disto = nn.Embedding(self.recycle_bins, c_z)
1906
+ self.recycle_disto.weight[0].detach().zero_()
1907
+
1908
+ self.structure_module = EsmFoldStructureModule(config.structure_module)
1909
+ self.trunk2sm_s = nn.Linear(c_s, config.structure_module.sequence_dim)
1910
+ self.trunk2sm_z = nn.Linear(c_z, config.structure_module.pairwise_dim)
1911
+
1912
+ self.chunk_size = config.chunk_size
1913
+
1914
+ def set_chunk_size(self, chunk_size):
1915
+ # This parameter means the axial attention will be computed
1916
+ # in a chunked manner. This should make the memory used more or less O(L) instead of O(L^2).
1917
+ # It's equivalent to running a for loop over chunks of the dimension we're iterative over,
1918
+ # where the chunk_size is the size of the chunks, so 128 would mean to parse 128-length chunks.
1919
+ self.chunk_size = chunk_size
1920
+
1921
+ def forward(self, seq_feats, pair_feats, true_aa, residx, mask, no_recycles):
1922
+ """
1923
+ Inputs:
1924
+ seq_feats: B x L x C tensor of sequence features pair_feats: B x L x L x C tensor of pair features residx: B
1925
+ x L long tensor giving the position in the sequence mask: B x L boolean tensor indicating valid residues
1926
+
1927
+ Output:
1928
+ predicted_structure: B x L x (num_atoms_per_residue * 3) tensor wrapped in a Coordinates object
1929
+ """
1930
+
1931
+ device = seq_feats.device
1932
+ s_s_0 = seq_feats
1933
+ s_z_0 = pair_feats
1934
+
1935
+ if no_recycles is None:
1936
+ no_recycles = self.config.max_recycles
1937
+ else:
1938
+ if no_recycles < 0:
1939
+ raise ValueError("Number of recycles must not be negative.")
1940
+ no_recycles += 1 # First 'recycle' is just the standard forward pass through the model.
1941
+
1942
+ def trunk_iter(s, z, residx, mask):
1943
+ z = z + self.pairwise_positional_embedding(residx, mask=mask)
1944
+
1945
+ for block in self.blocks:
1946
+ s, z = block(s, z, mask=mask, residue_index=residx, chunk_size=self.chunk_size)
1947
+ return s, z
1948
+
1949
+ s_s = s_s_0
1950
+ s_z = s_z_0
1951
+ recycle_s = torch.zeros_like(s_s)
1952
+ recycle_z = torch.zeros_like(s_z)
1953
+ recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64)
1954
+
1955
+ for recycle_idx in range(no_recycles):
1956
+ with ContextManagers([] if recycle_idx == no_recycles - 1 else [torch.no_grad()]):
1957
+ # === Recycling ===
1958
+ recycle_s = self.recycle_s_norm(recycle_s.detach()).to(device)
1959
+ recycle_z = self.recycle_z_norm(recycle_z.detach()).to(device)
1960
+ recycle_z += self.recycle_disto(recycle_bins.detach()).to(device)
1961
+
1962
+ s_s, s_z = trunk_iter(s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask)
1963
+
1964
+ # === Structure module ===
1965
+ structure = self.structure_module(
1966
+ {"single": self.trunk2sm_s(s_s), "pair": self.trunk2sm_z(s_z)},
1967
+ true_aa,
1968
+ mask.float(),
1969
+ )
1970
+
1971
+ recycle_s = s_s
1972
+ recycle_z = s_z
1973
+ # Distogram needs the N, CA, C coordinates, and bin constants same as alphafold.
1974
+ recycle_bins = EsmFoldingTrunk.distogram(
1975
+ structure["positions"][-1][:, :, :3],
1976
+ 3.375,
1977
+ 21.375,
1978
+ self.recycle_bins,
1979
+ )
1980
+
1981
+ structure["s_s"] = s_s
1982
+ structure["s_z"] = s_z
1983
+
1984
+ return structure
1985
+
1986
+ @staticmethod
1987
+ def distogram(coords, min_bin, max_bin, num_bins):
1988
+ # Coords are [... L x 3 x 3], where it's [N, CA, C] x 3 coordinates.
1989
+ boundaries = torch.linspace(
1990
+ min_bin,
1991
+ max_bin,
1992
+ num_bins - 1,
1993
+ device=coords.device,
1994
+ )
1995
+ boundaries = boundaries**2
1996
+ N, CA, C = [x.squeeze(-2) for x in coords.chunk(3, dim=-2)]
1997
+ # Infer CB coordinates.
1998
+ b = CA - N
1999
+ c = C - CA
2000
+ a = b.cross(c, dim=-1)
2001
+ CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA
2002
+ dists = (CB[..., None, :, :] - CB[..., :, None, :]).pow(2).sum(dim=-1, keepdims=True)
2003
+ bins = torch.sum(dists > boundaries, dim=-1) # [..., L, L]
2004
+ return bins
2005
+
2006
+
2007
+ # TODO Add information to the docstring about any methods that convert to PDB format, or otherwise prepare
2008
+ # the outputs for downstream use.
2009
+
2010
+
2011
+ @add_start_docstrings(
2012
+ """
2013
+ ESMForProteinFolding is the HuggingFace port of the original ESMFold model. It consists of an ESM-2 "stem" followed
2014
+ by a protein folding "head", although unlike most other output heads, this "head" is similar in size and runtime to
2015
+ the rest of the model combined! It outputs a dictionary containing predicted structural information about the input
2016
+ protein(s).
2017
+ """,
2018
+ ESM_START_DOCSTRING,
2019
+ )
2020
+ class EsmForProteinFolding(EsmPreTrainedModel):
2021
+ _no_split_modules = ["EsmFoldStructureModule", "EsmFoldTriangularSelfAttentionBlock"]
2022
+
2023
+ def __init__(self, config):
2024
+ super().__init__(config)
2025
+
2026
+ self.config = config
2027
+
2028
+ self.distogram_bins = 64
2029
+
2030
+ self.esm = EsmModel(config, add_pooling_layer=False)
2031
+
2032
+ self.esm.requires_grad_(False)
2033
+ if self.config.esmfold_config.fp16_esm:
2034
+ self.esm.half()
2035
+
2036
+ self.esm_feats = self.config.hidden_size
2037
+ self.esm_attns = self.config.num_hidden_layers * self.config.num_attention_heads
2038
+ self.esm_layers = self.config.num_hidden_layers
2039
+ self.register_buffer("af2_to_esm", self._af2_to_esm_from_vocab_list(config.vocab_list))
2040
+ self.esm_s_combine = nn.Parameter(torch.zeros(self.esm_layers + 1))
2041
+
2042
+ trunk_config = self.config.esmfold_config.trunk
2043
+ c_s = trunk_config.sequence_state_dim
2044
+ c_z = trunk_config.pairwise_state_dim
2045
+ self.esm_s_mlp = nn.Sequential(
2046
+ LayerNorm(self.esm_feats),
2047
+ nn.Linear(self.esm_feats, c_s),
2048
+ nn.ReLU(),
2049
+ nn.Linear(c_s, c_s),
2050
+ )
2051
+
2052
+ # 0 is padding, N is unknown residues, N + 1 is mask.
2053
+ self.n_tokens_embed = residue_constants.restype_num + 3
2054
+ self.pad_idx = 0
2055
+ self.unk_idx = self.n_tokens_embed - 2
2056
+ self.mask_idx = self.n_tokens_embed - 1
2057
+ self.esm_dict_cls_idx = self.config.vocab_list.index("<cls>")
2058
+ self.esm_dict_mask_idx = self.config.vocab_list.index("<mask>")
2059
+ self.esm_dict_eos_idx = self.config.vocab_list.index("<eos>")
2060
+ self.esm_dict_padding_idx = self.config.vocab_list.index("<pad>")
2061
+ if self.config.esmfold_config.embed_aa:
2062
+ self.embedding = nn.Embedding(self.n_tokens_embed, c_s, padding_idx=0)
2063
+
2064
+ self.trunk = EsmFoldingTrunk(trunk_config)
2065
+
2066
+ self.distogram_head = nn.Linear(c_z, self.distogram_bins)
2067
+ self.ptm_head = nn.Linear(c_z, self.distogram_bins)
2068
+ self.lm_head = nn.Linear(c_s, self.n_tokens_embed)
2069
+ self.lddt_bins = 50
2070
+ structure_module_config = trunk_config.structure_module
2071
+ self.lddt_head = nn.Sequential(
2072
+ nn.LayerNorm(structure_module_config.sequence_dim),
2073
+ nn.Linear(structure_module_config.sequence_dim, self.config.esmfold_config.lddt_head_hid_dim),
2074
+ nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, self.config.esmfold_config.lddt_head_hid_dim),
2075
+ nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, 37 * self.lddt_bins),
2076
+ )
2077
+
2078
+ @staticmethod
2079
+ def _af2_to_esm_from_vocab_list(vocab_list: List[str]) -> torch.Tensor:
2080
+ # Remember that t is shifted from residue_constants by 1 (0 is padding).
2081
+ esm_reorder = [vocab_list.index("<pad>")] + [vocab_list.index(v) for v in residue_constants.restypes_with_x]
2082
+ return torch.tensor(esm_reorder)
2083
+
2084
+ @add_start_docstrings_to_model_forward(ESMFOLD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
2085
+ @replace_return_docstrings(output_type=EsmForProteinFoldingOutput, config_class=EsmConfig)
2086
+ def forward(
2087
+ self,
2088
+ input_ids: torch.Tensor,
2089
+ attention_mask: Optional[torch.Tensor] = None,
2090
+ position_ids: Optional[torch.Tensor] = None,
2091
+ masking_pattern: Optional[torch.Tensor] = None,
2092
+ num_recycles: Optional[int] = None,
2093
+ ) -> EsmForProteinFoldingOutput:
2094
+ r"""
2095
+ Returns:
2096
+
2097
+ Example:
2098
+
2099
+ ```python
2100
+ >>> from transformers import AutoTokenizer, EsmForProteinFolding
2101
+
2102
+ >>> model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1")
2103
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1")
2104
+ >>> inputs = tokenizer(["MLKNVQVQLV"], return_tensors="pt", add_special_tokens=False) # A tiny random peptide
2105
+ >>> outputs = model(**inputs)
2106
+ >>> folded_positions = outputs.positions
2107
+ ```
2108
+
2109
+ """
2110
+ cfg = self.config.esmfold_config
2111
+
2112
+ aa = input_ids # B x L
2113
+ B = aa.shape[0]
2114
+ L = aa.shape[1]
2115
+ device = input_ids.device
2116
+ if attention_mask is None:
2117
+ attention_mask = torch.ones_like(aa, device=device)
2118
+ if position_ids is None:
2119
+ position_ids = torch.arange(L, device=device).expand_as(input_ids)
2120
+
2121
+ # === ESM ===
2122
+ esmaa = self.af2_idx_to_esm_idx(aa, attention_mask)
2123
+
2124
+ if masking_pattern is not None:
2125
+ masked_aa, esmaa, mlm_targets = self.bert_mask(aa, esmaa, attention_mask, masking_pattern)
2126
+ else:
2127
+ masked_aa = aa
2128
+ mlm_targets = None
2129
+
2130
+ # We get sequence and pair representations from whatever version of ESM /
2131
+ # configuration we are using. The sequence representation esm_s is always
2132
+ # present. The pair embedding esm_z may be present depending on the
2133
+ # configuration of the model. If esm_z is not used by the model then it
2134
+ # is returned as None here.
2135
+ esm_s = self.compute_language_model_representations(esmaa)
2136
+
2137
+ # Convert esm_s and esm_z, if present, to the precision used by the trunk and
2138
+ # the structure module. These tensors may be a lower precision if, for example,
2139
+ # we're running the language model in fp16 precision.
2140
+ esm_s = esm_s.to(self.esm_s_combine.dtype)
2141
+
2142
+ if cfg.esm_ablate_sequence:
2143
+ esm_s = esm_s * 0
2144
+
2145
+ esm_s = esm_s.detach()
2146
+
2147
+ # === preprocessing ===
2148
+ esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2)
2149
+ s_s_0 = self.esm_s_mlp(esm_s)
2150
+
2151
+ s_z_0 = s_s_0.new_zeros(B, L, L, cfg.trunk.pairwise_state_dim)
2152
+
2153
+ if self.config.esmfold_config.embed_aa:
2154
+ s_s_0 += self.embedding(masked_aa)
2155
+
2156
+ structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles)
2157
+ # Documenting what we expect:
2158
+ structure = {
2159
+ k: v
2160
+ for k, v in structure.items()
2161
+ if k
2162
+ in [
2163
+ "s_z",
2164
+ "s_s",
2165
+ "frames",
2166
+ "sidechain_frames",
2167
+ "unnormalized_angles",
2168
+ "angles",
2169
+ "positions",
2170
+ "states",
2171
+ ]
2172
+ }
2173
+
2174
+ # Add BERT mask for the loss to use, if available.
2175
+ if mlm_targets:
2176
+ structure["mlm_targets"] = mlm_targets
2177
+
2178
+ disto_logits = self.distogram_head(structure["s_z"])
2179
+ disto_logits = (disto_logits + disto_logits.transpose(1, 2)) / 2
2180
+ structure["distogram_logits"] = disto_logits
2181
+
2182
+ lm_logits = self.lm_head(structure["s_s"])
2183
+ structure["lm_logits"] = lm_logits
2184
+
2185
+ structure["aatype"] = aa
2186
+ make_atom14_masks(structure)
2187
+ # Of course, this doesn't respect the true mask because it doesn't know about it...
2188
+ # We're not going to properly mask change of index tensors:
2189
+ # "residx_atom14_to_atom37",
2190
+ # "residx_atom37_to_atom14",
2191
+ for k in [
2192
+ "atom14_atom_exists",
2193
+ "atom37_atom_exists",
2194
+ ]:
2195
+ structure[k] *= attention_mask.unsqueeze(-1)
2196
+ structure["residue_index"] = position_ids
2197
+
2198
+ lddt_head = self.lddt_head(structure["states"]).reshape(structure["states"].shape[0], B, L, -1, self.lddt_bins)
2199
+ structure["lddt_head"] = lddt_head
2200
+ plddt = categorical_lddt(lddt_head[-1], bins=self.lddt_bins)
2201
+ structure["plddt"] = plddt
2202
+
2203
+ ptm_logits = self.ptm_head(structure["s_z"])
2204
+ structure["ptm_logits"] = ptm_logits
2205
+ structure["ptm"] = compute_tm(ptm_logits, max_bin=31, no_bins=self.distogram_bins)
2206
+ structure.update(compute_predicted_aligned_error(ptm_logits, max_bin=31, no_bins=self.distogram_bins))
2207
+
2208
+ return EsmForProteinFoldingOutput(**structure)
2209
+
2210
+ def af2_idx_to_esm_idx(self, aa, mask):
2211
+ # avoid indexing on different devices
2212
+ if self.af2_to_esm.device != aa.device:
2213
+ self.af2_to_esm = self.af2_to_esm.to(aa.device)
2214
+ aa = (aa + 1).masked_fill(mask != 1, 0)
2215
+ return self.af2_to_esm[aa]
2216
+
2217
+ def compute_language_model_representations(self, esmaa: torch.Tensor) -> torch.Tensor:
2218
+ device = next(self.parameters()).device
2219
+ B, L = esmaa.shape # B = batch size, L = sequence length.
2220
+
2221
+ if self.config.esmfold_config.bypass_lm:
2222
+ esm_s = torch.zeros(B, L, self.esm_s_combine.size[0], -1, self.esm_feats, device=device)
2223
+ return esm_s
2224
+
2225
+ bosi, eosi = self.esm_dict_cls_idx, self.esm_dict_eos_idx
2226
+ bos = esmaa.new_full((B, 1), bosi)
2227
+ eos = esmaa.new_full((B, 1), self.esm_dict_padding_idx)
2228
+ esmaa = torch.cat([bos, esmaa, eos], dim=1)
2229
+ # Use the first padding index as eos during inference.
2230
+ esmaa[range(B), (esmaa != 1).sum(1)] = eosi
2231
+
2232
+ # _, esm_z, esm_s = self.esm(esmaa, return_pairs=self.config.esmfold_config.use_esm_attn_map)
2233
+ # Because we do not support use_esm_attn_map in the HF port as it is not used in any public models,
2234
+ # esm_z is always None
2235
+ esm_hidden_states = self.esm(esmaa, attention_mask=esmaa != 1, output_hidden_states=True)["hidden_states"]
2236
+ esm_s = torch.stack(esm_hidden_states, dim=2)
2237
+
2238
+ esm_s = esm_s[:, 1:-1] # B, L, nLayers, C
2239
+
2240
+ return esm_s
2241
+
2242
+ def bert_mask(self, aa, esmaa, mask, pattern):
2243
+ new_aa = aa.clone()
2244
+ target = aa.clone()
2245
+ new_esmaa = esmaa.clone()
2246
+ new_aa[pattern == 1] = self.mask_idx
2247
+ target[pattern != 1] = 0
2248
+ new_esmaa[pattern == 1] = self.esm_dict_mask_idx
2249
+ return new_aa, new_esmaa, target
2250
+
2251
+ @torch.no_grad()
2252
+ def infer(
2253
+ self,
2254
+ seqs: Union[str, List[str]],
2255
+ position_ids=None,
2256
+ ):
2257
+ if isinstance(seqs, str):
2258
+ lst = [seqs]
2259
+ else:
2260
+ lst = seqs
2261
+ # Returns the raw outputs of the model given an input sequence.
2262
+ device = next(self.parameters()).device
2263
+ aatype = collate_dense_tensors(
2264
+ [
2265
+ torch.from_numpy(
2266
+ residue_constants.sequence_to_onehot(
2267
+ sequence=seq,
2268
+ mapping=residue_constants.restype_order_with_x,
2269
+ map_unknown_to_x=True,
2270
+ )
2271
+ )
2272
+ .to(device)
2273
+ .argmax(dim=1)
2274
+ for seq in lst
2275
+ ]
2276
+ ) # B=1 x L
2277
+ mask = collate_dense_tensors([aatype.new_ones(len(seq)) for seq in lst])
2278
+ position_ids = (
2279
+ torch.arange(aatype.shape[1], device=device).expand(len(lst), -1)
2280
+ if position_ids is None
2281
+ else position_ids.to(device)
2282
+ )
2283
+ if position_ids.ndim == 1:
2284
+ position_ids = position_ids.unsqueeze(0)
2285
+ return self.forward(
2286
+ aatype,
2287
+ mask,
2288
+ position_ids=position_ids,
2289
+ )
2290
+
2291
+ @staticmethod
2292
+ def output_to_pdb(output: Dict) -> List[str]:
2293
+ """Returns the pbd (file) string from the model given the model output."""
2294
+ output = {k: v.to("cpu").numpy() for k, v in output.items()}
2295
+ pdbs = []
2296
+ final_atom_positions = atom14_to_atom37(output["positions"][-1], output)
2297
+ final_atom_mask = output["atom37_atom_exists"]
2298
+ for i in range(output["aatype"].shape[0]):
2299
+ aa = output["aatype"][i]
2300
+ pred_pos = final_atom_positions[i]
2301
+ mask = final_atom_mask[i]
2302
+ resid = output["residue_index"][i] + 1
2303
+ pred = OFProtein(
2304
+ aatype=aa,
2305
+ atom_positions=pred_pos,
2306
+ atom_mask=mask,
2307
+ residue_index=resid,
2308
+ b_factors=output["plddt"][i],
2309
+ )
2310
+ pdbs.append(to_pdb(pred))
2311
+ return pdbs
2312
+
2313
+ def infer_pdb(self, seqs, *args, **kwargs) -> str:
2314
+ """Returns the pdb (file) string from the model given an input sequence."""
2315
+ assert isinstance(seqs, str)
2316
+ output = self.infer(seqs, *args, **kwargs)
2317
+ return self.output_to_pdb(output)[0]
2318
+
2319
+ def infer_pdbs(self, seqs: List[str], *args, **kwargs) -> List[str]:
2320
+ """Returns the pdb (file) string from the model given an input sequence."""
2321
+ output = self.infer(seqs, *args, **kwargs)
2322
+ return self.output_to_pdb(output)
llmeval-env/lib/python3.10/site-packages/transformers/models/esm/modeling_tf_esm.py ADDED
@@ -0,0 +1,1567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ESM model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import os
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutputWithPastAndCrossAttentions,
29
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
30
+ TFMaskedLMOutput,
31
+ TFSequenceClassifierOutput,
32
+ TFTokenClassifierOutput,
33
+ )
34
+ from ...modeling_tf_utils import (
35
+ TFMaskedLanguageModelingLoss,
36
+ TFModelInputType,
37
+ TFPreTrainedModel,
38
+ TFSequenceClassificationLoss,
39
+ TFTokenClassificationLoss,
40
+ get_initializer,
41
+ keras,
42
+ shape_list,
43
+ unpack_inputs,
44
+ )
45
+ from ...tf_utils import check_embeddings_within_bounds, stable_softmax
46
+ from ...utils import logging
47
+ from .configuration_esm import EsmConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D"
53
+ _CONFIG_FOR_DOC = "EsmConfig"
54
+
55
+
56
+ def rotate_half(x):
57
+ x1, x2 = tf.split(x, 2, axis=-1)
58
+ return tf.concat((-x2, x1), axis=-1)
59
+
60
+
61
+ def apply_rotary_pos_emb(x, cos, sin):
62
+ cos = cos[:, :, : tf.shape(x)[-2], :]
63
+ sin = sin[:, :, : tf.shape(x)[-2], :]
64
+
65
+ return (x * cos) + (rotate_half(x) * sin)
66
+
67
+
68
+ def symmetrize(x):
69
+ "Make layer symmetric in final two dimensions, used for contact prediction."
70
+ return x + tf.linalg.matrix_transpose(x) # Transposes last two dimensions only
71
+
72
+
73
+ def average_product_correct(x):
74
+ "Perform average product correct, used for contact prediction."
75
+ a1 = tf.reduce_sum(x, -1, keepdims=True)
76
+ a2 = tf.reduce_sum(x, -2, keepdims=True)
77
+ a12 = tf.reduce_sum(x, (-1, -2), keepdims=True)
78
+
79
+ avg = a1 * a2
80
+ avg = avg / a12
81
+ normalized = x - avg
82
+ return normalized
83
+
84
+
85
+ class TFRotaryEmbedding(keras.layers.Layer):
86
+ """
87
+ Rotary position embeddings based on those in
88
+ [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
89
+ matrices which depend on their relative positions.
90
+ """
91
+
92
+ def __init__(self, dim: int, name=None):
93
+ super().__init__(name=name)
94
+ # Matt: The PyTorch version of this layer does a lot of work to cache values, but we just rely on TF compilation
95
+ # and/or XLA to sort out constants like that. It actually may not seem like this layer needs to be stateful at
96
+ # all when we benefit from TF compilation, but it does. The reason is that self.inv_freq is a buffer in the
97
+ # original implementation, but all the shared ESM checkpoints were trained with fp16 params. This means that
98
+ # the inv_freq tensor was stored as a float16, and we need to replicate those lower-precision values or our
99
+ # models give different outputs from the original.
100
+ self.dim = dim
101
+
102
+ def build(self, input_shape):
103
+ super().build(input_shape)
104
+ self.inv_freq = self.add_weight(
105
+ "inv_freq", shape=(self.dim // 2,), dtype=tf.float32, initializer=get_initializer(1.0), trainable=False
106
+ )
107
+ self.inv_freq.assign(
108
+ 1.0 / (10000 ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim))
109
+ )
110
+
111
+ def _compute_cos_sin(self, x, seq_dimension=2):
112
+ seq_len = tf.shape(x)[seq_dimension]
113
+
114
+ t = tf.range(seq_len, dtype=self.inv_freq.dtype)
115
+ freqs = tf.einsum("i, j -> ij", t, self.inv_freq) # Outer multiplication
116
+ emb = tf.concat((freqs, freqs), axis=-1)[None, None, :, :]
117
+
118
+ return tf.cos(emb), tf.sin(emb)
119
+
120
+ def call(self, q: tf.Tensor, k: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
121
+ cos_emb, sin_emb = self._compute_cos_sin(k, seq_dimension=-2)
122
+
123
+ return (
124
+ apply_rotary_pos_emb(q, cos_emb, sin_emb),
125
+ apply_rotary_pos_emb(k, cos_emb, sin_emb),
126
+ )
127
+
128
+
129
+ class TFEsmContactPredictionHead(keras.layers.Layer):
130
+ """Performs symmetrization, apc, and computes a logistic regression on the output features"""
131
+
132
+ def __init__(
133
+ self,
134
+ in_features: int,
135
+ bias=True,
136
+ eos_idx: int = 2,
137
+ name=None,
138
+ ):
139
+ super().__init__(name=name)
140
+ self.eos_idx = eos_idx
141
+ self.in_features = in_features
142
+ self.regression = keras.layers.Dense(1, use_bias=bias, activation="sigmoid", name="regression")
143
+
144
+ def build(self, input_shape=None):
145
+ if self.built:
146
+ return
147
+ self.built = True
148
+ if getattr(self, "regression", None) is not None:
149
+ with tf.name_scope(self.regression.name):
150
+ self.regression.build((None, self.in_features))
151
+
152
+ def call(self, tokens, attentions):
153
+ # remove eos token attentions
154
+ eos_mask = tf.cast(tokens != self.eos_idx, attentions.dtype)
155
+ eos_mask = tf.expand_dims(eos_mask, 1) * tf.expand_dims(eos_mask, 2)
156
+ attentions = attentions * eos_mask[:, None, None, :, :]
157
+ attentions = attentions[..., :-1, :-1]
158
+ # remove cls token attentions
159
+ attentions = attentions[..., 1:, 1:]
160
+ batch_size, layers, heads, seqlen, _ = shape_list(attentions)
161
+ attentions = tf.reshape(attentions, (batch_size, layers * heads, seqlen, seqlen))
162
+
163
+ # features: batch x channels x tokens x tokens (symmetric)
164
+ attentions = average_product_correct(symmetrize(attentions))
165
+ attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))
166
+ return tf.squeeze(self.regression(attentions), 3)
167
+
168
+
169
+ class TFEsmEmbeddings(keras.layers.Layer):
170
+ """
171
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
172
+ """
173
+
174
+ def __init__(self, config, name=None):
175
+ super().__init__(name=name)
176
+ self.word_embeddings = keras.layers.Embedding(
177
+ config.vocab_size,
178
+ config.hidden_size,
179
+ embeddings_initializer=get_initializer(config.initializer_range),
180
+ name="word_embeddings",
181
+ )
182
+ self.position_embeddings = keras.layers.Embedding(
183
+ config.max_position_embeddings,
184
+ config.hidden_size,
185
+ embeddings_initializer=get_initializer(config.initializer_range),
186
+ name="position_embeddings",
187
+ )
188
+
189
+ if config.emb_layer_norm_before:
190
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
191
+ else:
192
+ self.layer_norm = None
193
+ # Matt: I think this line was copied incorrectly from BERT, disabling for now
194
+ # self.dropout = Dropout(config.hidden_dropout_prob)
195
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
196
+
197
+ self.position_ids = tf.range(config.max_position_embeddings)[None, :]
198
+
199
+ self.padding_idx = config.pad_token_id
200
+ self.token_dropout = config.token_dropout
201
+ self.mask_token_id = config.mask_token_id
202
+ self.config = config
203
+
204
+ def call(
205
+ self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
206
+ ):
207
+ if position_ids is None:
208
+ if input_ids is not None:
209
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
210
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
211
+ else:
212
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
213
+
214
+ if inputs_embeds is None:
215
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
216
+ inputs_embeds = self.word_embeddings(input_ids)
217
+
218
+ # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an
219
+ # embedding_scale factor here.
220
+ embeddings = inputs_embeds
221
+
222
+ # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout
223
+ # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however,
224
+ # masked tokens are treated as if they were selected for input dropout and zeroed out.
225
+ # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by
226
+ # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample).
227
+ # This is analogous to the way that dropout layers scale down outputs during evaluation when not
228
+ # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training).
229
+ if self.token_dropout:
230
+ embeddings = tf.where((input_ids == self.mask_token_id)[:, :, None], 0.0, embeddings)
231
+ mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs
232
+ src_lengths = tf.cast(tf.reduce_sum(attention_mask, axis=-1), tf.float32)
233
+ masked_tokens = input_ids == self.mask_token_id
234
+ mask_ratio_observed = tf.math.count_nonzero(masked_tokens, dtype=tf.float32, axis=-1) / src_lengths
235
+ embeddings = embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
236
+
237
+ if self.position_embedding_type == "absolute":
238
+ position_embeddings = self.position_embeddings(position_ids)
239
+ embeddings += position_embeddings
240
+
241
+ if self.layer_norm is not None:
242
+ embeddings = self.layer_norm(embeddings)
243
+ if attention_mask is not None:
244
+ embeddings = embeddings * tf.cast(tf.expand_dims(attention_mask, -1), embeddings.dtype)
245
+ # Matt: I think this line was copied incorrectly from BERT, disabling it for now.
246
+ # embeddings = self.dropout(embeddings)
247
+ return embeddings
248
+
249
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
250
+ """
251
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
252
+
253
+ Args:
254
+ inputs_embeds: tf.Tensor
255
+
256
+ Returns: tf.Tensor
257
+ """
258
+ input_shape = shape_list(inputs_embeds)[:-1]
259
+ sequence_length = input_shape[1]
260
+
261
+ position_ids = tf.range(
262
+ start=self.padding_idx + 1, limit=sequence_length + self.padding_idx + 1, dtype=tf.int64
263
+ )
264
+ return tf.broadcast_to(tf.expand_dims(position_ids, 0), input_shape)
265
+
266
+ def build(self, input_shape=None):
267
+ if self.built:
268
+ return
269
+ self.built = True
270
+ if getattr(self, "word_embeddings", None) is not None:
271
+ with tf.name_scope(self.word_embeddings.name):
272
+ self.word_embeddings.build(None)
273
+ if getattr(self, "position_embeddings", None) is not None:
274
+ with tf.name_scope(self.position_embeddings.name):
275
+ self.position_embeddings.build(None)
276
+ if getattr(self, "layer_norm", None) is not None:
277
+ with tf.name_scope(self.layer_norm.name):
278
+ self.layer_norm.build([None, None, self.config.hidden_size])
279
+
280
+
281
+ class TFEsmSelfAttention(keras.layers.Layer):
282
+ def __init__(self, config, position_embedding_type=None, name=None):
283
+ super().__init__(name=name)
284
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
285
+ raise ValueError(
286
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
287
+ f"heads ({config.num_attention_heads})"
288
+ )
289
+
290
+ self.num_attention_heads = config.num_attention_heads
291
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
292
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
293
+
294
+ self.query = keras.layers.Dense(
295
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
296
+ )
297
+ self.key = keras.layers.Dense(
298
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
299
+ )
300
+ self.value = keras.layers.Dense(
301
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
302
+ )
303
+
304
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
305
+ self.position_embedding_type = position_embedding_type or getattr(
306
+ config, "position_embedding_type", "absolute"
307
+ )
308
+ self.rotary_embeddings = None
309
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
310
+ self.max_position_embeddings = config.max_position_embeddings
311
+ self.distance_embedding = keras.layers.Embedding(
312
+ 2 * config.max_position_embeddings - 1,
313
+ self.attention_head_size,
314
+ embeddings_initializer=get_initializer(config.initializer_range),
315
+ )
316
+ elif self.position_embedding_type == "rotary":
317
+ self.rotary_embeddings = TFRotaryEmbedding(dim=self.attention_head_size, name="rotary_embeddings")
318
+
319
+ self.is_decoder = config.is_decoder
320
+ self.config = config
321
+
322
+ def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor:
323
+ new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size]
324
+ x = tf.reshape(x, new_x_shape)
325
+ return tf.transpose(x, perm=(0, 2, 1, 3))
326
+
327
+ def call(
328
+ self,
329
+ hidden_states: tf.Tensor,
330
+ attention_mask: tf.Tensor | None = None,
331
+ head_mask: tf.Tensor | None = None,
332
+ encoder_hidden_states: tf.Tensor | None = None,
333
+ encoder_attention_mask: tf.Tensor | None = None,
334
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
335
+ output_attentions: Optional[bool] = False,
336
+ training: bool = False,
337
+ ) -> Tuple[tf.Tensor]:
338
+ mixed_query_layer = self.query(hidden_states)
339
+
340
+ # If this is instantiated as a cross-attention module, the keys
341
+ # and values come from an encoder; the attention mask needs to be
342
+ # such that the encoder's padding tokens are not attended to.
343
+ is_cross_attention = encoder_hidden_states is not None
344
+
345
+ if is_cross_attention and past_key_value is not None:
346
+ # reuse k,v, cross_attentions
347
+ key_layer = past_key_value[0]
348
+ value_layer = past_key_value[1]
349
+ attention_mask = encoder_attention_mask
350
+ elif is_cross_attention:
351
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
352
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
353
+ attention_mask = encoder_attention_mask
354
+ elif past_key_value is not None:
355
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
356
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
357
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
358
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
359
+ else:
360
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
361
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
362
+
363
+ query_layer = self.transpose_for_scores(mixed_query_layer)
364
+
365
+ # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim).
366
+ # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent,
367
+ # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original
368
+ # ESM code and fix rotary embeddings.
369
+ query_layer = query_layer * self.attention_head_size**-0.5
370
+
371
+ if self.is_decoder:
372
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
373
+ # Further calls to cross_attention layer can then reuse all cross-attention
374
+ # key/value_states (first "if" case)
375
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
376
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
377
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
378
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
379
+ past_key_value = (key_layer, value_layer)
380
+
381
+ if self.position_embedding_type == "rotary":
382
+ query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
383
+
384
+ # Take the dot product between "query" and "key" to get the raw attention scores.
385
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
386
+
387
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
388
+ seq_length = shape_list(hidden_states)[1]
389
+ position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), -1)
390
+ position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), 0)
391
+ distance = position_ids_l - position_ids_r
392
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
393
+ positional_embedding = tf.cast(positional_embedding, query_layer.dtype) # fp16 compatibility
394
+
395
+ if self.position_embedding_type == "relative_key":
396
+ relative_position_scores = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
397
+ attention_scores = attention_scores + relative_position_scores
398
+ elif self.position_embedding_type == "relative_key_query":
399
+ relative_position_scores_query = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
400
+ relative_position_scores_key = tf.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
401
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
402
+
403
+ if attention_mask is not None:
404
+ # Apply the attention mask is (precomputed for all layers in EsmModel forward() function)
405
+ attention_scores = attention_scores + attention_mask
406
+
407
+ # Normalize the attention scores to probabilities.
408
+ attention_probs = stable_softmax(attention_scores, axis=-1)
409
+
410
+ # This is actually dropping out entire tokens to attend to, which might
411
+ # seem a bit unusual, but is taken from the original Transformer paper.
412
+ attention_probs = self.dropout(attention_probs, training=training)
413
+
414
+ # Mask heads if we want to
415
+ if head_mask is not None:
416
+ attention_probs = attention_probs * head_mask
417
+
418
+ context_layer = attention_probs @ value_layer
419
+
420
+ context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3))
421
+ new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size]
422
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
423
+
424
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
425
+
426
+ if self.is_decoder:
427
+ outputs = outputs + (past_key_value,)
428
+ return outputs
429
+
430
+ def build(self, input_shape=None):
431
+ if self.built:
432
+ return
433
+ self.built = True
434
+ if getattr(self, "query", None) is not None:
435
+ with tf.name_scope(self.query.name):
436
+ self.query.build([None, None, self.config.hidden_size])
437
+ if getattr(self, "key", None) is not None:
438
+ with tf.name_scope(self.key.name):
439
+ self.key.build([None, None, self.config.hidden_size])
440
+ if getattr(self, "value", None) is not None:
441
+ with tf.name_scope(self.value.name):
442
+ self.value.build([None, None, self.config.hidden_size])
443
+ if getattr(self, "rotary_embeddings", None) is not None:
444
+ with tf.name_scope(self.rotary_embeddings.name):
445
+ self.rotary_embeddings.build(None)
446
+
447
+
448
+ class TFEsmSelfOutput(keras.layers.Layer):
449
+ def __init__(self, config, name=None):
450
+ super().__init__(name=name)
451
+ self.dense = keras.layers.Dense(
452
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
453
+ )
454
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
455
+ self.config = config
456
+
457
+ def call(self, hidden_states, input_tensor, training=False):
458
+ hidden_states = self.dense(hidden_states)
459
+ hidden_states = self.dropout(hidden_states, training=training)
460
+ hidden_states += input_tensor
461
+ return hidden_states
462
+
463
+ def build(self, input_shape=None):
464
+ if self.built:
465
+ return
466
+ self.built = True
467
+ if getattr(self, "dense", None) is not None:
468
+ with tf.name_scope(self.dense.name):
469
+ self.dense.build([None, None, self.config.hidden_size])
470
+
471
+
472
+ class TFEsmAttention(keras.layers.Layer):
473
+ def __init__(self, config, name=None):
474
+ super().__init__(name=name)
475
+ self.self = TFEsmSelfAttention(config, name="self")
476
+ self.output_layer = TFEsmSelfOutput(config, name="output")
477
+ self.pruned_heads = set()
478
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
479
+ self.config = config
480
+
481
+ def prune_heads(self, heads):
482
+ raise NotImplementedError
483
+
484
+ def call(
485
+ self,
486
+ hidden_states,
487
+ attention_mask=None,
488
+ head_mask=None,
489
+ encoder_hidden_states=None,
490
+ encoder_attention_mask=None,
491
+ past_key_value=None,
492
+ output_attentions=False,
493
+ training=False,
494
+ ):
495
+ hidden_states_ln = self.LayerNorm(hidden_states)
496
+ self_outputs = self.self(
497
+ hidden_states_ln,
498
+ attention_mask,
499
+ head_mask,
500
+ encoder_hidden_states,
501
+ encoder_attention_mask,
502
+ past_key_value,
503
+ output_attentions,
504
+ training,
505
+ )
506
+ attention_output = self.output_layer(self_outputs[0], hidden_states)
507
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
508
+ return outputs
509
+
510
+ def build(self, input_shape=None):
511
+ if self.built:
512
+ return
513
+ self.built = True
514
+ if getattr(self, "self", None) is not None:
515
+ with tf.name_scope(self.self.name):
516
+ self.self.build(None)
517
+ if getattr(self, "output_layer", None) is not None:
518
+ with tf.name_scope(self.output_layer.name):
519
+ self.output_layer.build(None)
520
+ if getattr(self, "LayerNorm", None) is not None:
521
+ with tf.name_scope(self.LayerNorm.name):
522
+ self.LayerNorm.build([None, None, self.config.hidden_size])
523
+
524
+
525
+ class TFEsmIntermediate(keras.layers.Layer):
526
+ def __init__(self, config: EsmConfig, **kwargs):
527
+ super().__init__(**kwargs)
528
+
529
+ self.dense = keras.layers.Dense(
530
+ units=config.intermediate_size,
531
+ kernel_initializer=get_initializer(config.initializer_range),
532
+ name="dense",
533
+ )
534
+ self.config = config
535
+
536
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
537
+ hidden_states = self.dense(inputs=hidden_states)
538
+ hidden_states = tf.nn.gelu(hidden_states)
539
+ return hidden_states
540
+
541
+ def build(self, input_shape=None):
542
+ if self.built:
543
+ return
544
+ self.built = True
545
+ if getattr(self, "dense", None) is not None:
546
+ with tf.name_scope(self.dense.name):
547
+ self.dense.build([None, None, self.config.hidden_size])
548
+
549
+
550
+ class TFEsmOutput(keras.layers.Layer):
551
+ def __init__(self, config, name=None):
552
+ super().__init__(name=name)
553
+ self.dense = keras.layers.Dense(
554
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
555
+ )
556
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
557
+ self.config = config
558
+
559
+ def call(self, hidden_states, input_tensor, training=False):
560
+ hidden_states = self.dense(hidden_states)
561
+ hidden_states = self.dropout(hidden_states, training=training)
562
+ hidden_states += input_tensor
563
+ return hidden_states
564
+
565
+ def build(self, input_shape=None):
566
+ if self.built:
567
+ return
568
+ self.built = True
569
+ if getattr(self, "dense", None) is not None:
570
+ with tf.name_scope(self.dense.name):
571
+ self.dense.build([None, None, self.config.intermediate_size])
572
+
573
+
574
+ class TFEsmLayer(keras.layers.Layer):
575
+ def __init__(self, config, name=None):
576
+ super().__init__(name=name)
577
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
578
+ self.seq_len_dim = 1
579
+ self.attention = TFEsmAttention(config, name="attention")
580
+ self.is_decoder = config.is_decoder
581
+ self.add_cross_attention = config.add_cross_attention
582
+ if self.add_cross_attention:
583
+ if not self.is_decoder:
584
+ raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
585
+ self.crossattention = TFEsmAttention(config)
586
+ self.intermediate = TFEsmIntermediate(config, name="intermediate")
587
+ self.output_layer = TFEsmOutput(config, name="output")
588
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
589
+ self.config = config
590
+
591
+ def call(
592
+ self,
593
+ hidden_states,
594
+ attention_mask=None,
595
+ head_mask=None,
596
+ encoder_hidden_states=None,
597
+ encoder_attention_mask=None,
598
+ past_key_value=None,
599
+ output_attentions=False,
600
+ training=False,
601
+ ):
602
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
603
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
604
+ self_attention_outputs = self.attention(
605
+ hidden_states,
606
+ attention_mask,
607
+ head_mask,
608
+ output_attentions=output_attentions,
609
+ past_key_value=self_attn_past_key_value,
610
+ training=training,
611
+ )
612
+ attention_output = self_attention_outputs[0]
613
+
614
+ # if decoder, the last output is tuple of self-attn cache
615
+ if self.is_decoder:
616
+ outputs = self_attention_outputs[1:-1]
617
+ present_key_value = self_attention_outputs[-1]
618
+ else:
619
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
620
+
621
+ cross_attn_present_key_value = None
622
+ if self.is_decoder and encoder_hidden_states is not None:
623
+ if not hasattr(self, "crossattention"):
624
+ raise AttributeError(
625
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated"
626
+ " with cross-attention layers by setting `config.add_cross_attention=True`"
627
+ )
628
+
629
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
630
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
631
+ cross_attention_outputs = self.crossattention(
632
+ attention_output,
633
+ attention_mask,
634
+ head_mask,
635
+ encoder_hidden_states,
636
+ encoder_attention_mask,
637
+ cross_attn_past_key_value,
638
+ output_attentions,
639
+ training=training,
640
+ )
641
+ attention_output = cross_attention_outputs[0]
642
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
643
+
644
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
645
+ cross_attn_present_key_value = cross_attention_outputs[-1]
646
+ present_key_value = present_key_value + cross_attn_present_key_value
647
+
648
+ layernorm_output = self.LayerNorm(attention_output)
649
+ intermediate_output = self.intermediate(hidden_states=layernorm_output)
650
+ layer_output = self.output_layer(
651
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
652
+ )
653
+ outputs = (layer_output,) + outputs # add attentions if we output them
654
+
655
+ # if decoder, return the attn key/values as the last output
656
+ if self.is_decoder:
657
+ outputs = outputs + (present_key_value,)
658
+
659
+ return outputs
660
+
661
+ def build(self, input_shape=None):
662
+ if self.built:
663
+ return
664
+ self.built = True
665
+ if getattr(self, "attention", None) is not None:
666
+ with tf.name_scope(self.attention.name):
667
+ self.attention.build(None)
668
+ if getattr(self, "intermediate", None) is not None:
669
+ with tf.name_scope(self.intermediate.name):
670
+ self.intermediate.build(None)
671
+ if getattr(self, "output_layer", None) is not None:
672
+ with tf.name_scope(self.output_layer.name):
673
+ self.output_layer.build(None)
674
+ if getattr(self, "LayerNorm", None) is not None:
675
+ with tf.name_scope(self.LayerNorm.name):
676
+ self.LayerNorm.build([None, None, self.config.hidden_size])
677
+
678
+
679
+ class TFEsmEncoder(keras.layers.Layer):
680
+ def __init__(self, config, name=None):
681
+ super().__init__(name=name)
682
+ self.config = config
683
+ self.layer = [TFEsmLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
684
+ self.emb_layer_norm_after = keras.layers.LayerNormalization(
685
+ epsilon=config.layer_norm_eps, name="emb_layer_norm_after"
686
+ )
687
+
688
+ def call(
689
+ self,
690
+ hidden_states,
691
+ attention_mask=None,
692
+ head_mask=None,
693
+ encoder_hidden_states=None,
694
+ encoder_attention_mask=None,
695
+ past_key_values=None,
696
+ use_cache=None,
697
+ output_attentions=False,
698
+ output_hidden_states=False,
699
+ return_dict=True,
700
+ training=False,
701
+ ):
702
+ all_hidden_states = () if output_hidden_states else None
703
+ all_self_attentions = () if output_attentions else None
704
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
705
+
706
+ next_decoder_cache = () if use_cache else None
707
+ for i, layer_module in enumerate(self.layer):
708
+ if output_hidden_states:
709
+ all_hidden_states = all_hidden_states + (hidden_states,)
710
+
711
+ layer_head_mask = head_mask[i] if head_mask is not None else None
712
+ past_key_value = past_key_values[i] if past_key_values is not None else None
713
+
714
+ layer_outputs = layer_module(
715
+ hidden_states,
716
+ attention_mask,
717
+ layer_head_mask,
718
+ encoder_hidden_states,
719
+ encoder_attention_mask,
720
+ past_key_value,
721
+ output_attentions,
722
+ training,
723
+ )
724
+
725
+ hidden_states = layer_outputs[0]
726
+ if use_cache:
727
+ next_decoder_cache += (layer_outputs[-1],)
728
+ if output_attentions:
729
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
730
+ if self.config.add_cross_attention:
731
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
732
+
733
+ if self.emb_layer_norm_after:
734
+ hidden_states = self.emb_layer_norm_after(hidden_states)
735
+
736
+ if output_hidden_states:
737
+ all_hidden_states = all_hidden_states + (hidden_states,)
738
+
739
+ if not return_dict:
740
+ return tuple(
741
+ v
742
+ for v in [
743
+ hidden_states,
744
+ next_decoder_cache,
745
+ all_hidden_states,
746
+ all_self_attentions,
747
+ all_cross_attentions,
748
+ ]
749
+ if v is not None
750
+ )
751
+ return TFBaseModelOutputWithPastAndCrossAttentions(
752
+ last_hidden_state=hidden_states,
753
+ past_key_values=next_decoder_cache,
754
+ hidden_states=all_hidden_states,
755
+ attentions=all_self_attentions,
756
+ cross_attentions=all_cross_attentions,
757
+ )
758
+
759
+ def build(self, input_shape=None):
760
+ if self.built:
761
+ return
762
+ self.built = True
763
+ if getattr(self, "emb_layer_norm_after", None) is not None:
764
+ with tf.name_scope(self.emb_layer_norm_after.name):
765
+ self.emb_layer_norm_after.build([None, None, self.config.hidden_size])
766
+ if getattr(self, "layer", None) is not None:
767
+ for layer in self.layer:
768
+ with tf.name_scope(layer.name):
769
+ layer.build(None)
770
+
771
+
772
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Esm
773
+ class TFEsmPooler(keras.layers.Layer):
774
+ def __init__(self, config: EsmConfig, **kwargs):
775
+ super().__init__(**kwargs)
776
+
777
+ self.dense = keras.layers.Dense(
778
+ units=config.hidden_size,
779
+ kernel_initializer=get_initializer(config.initializer_range),
780
+ activation="tanh",
781
+ name="dense",
782
+ )
783
+ self.config = config
784
+
785
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
786
+ # We "pool" the model by simply taking the hidden state corresponding
787
+ # to the first token.
788
+ first_token_tensor = hidden_states[:, 0]
789
+ pooled_output = self.dense(inputs=first_token_tensor)
790
+
791
+ return pooled_output
792
+
793
+ def build(self, input_shape=None):
794
+ if self.built:
795
+ return
796
+ self.built = True
797
+ if getattr(self, "dense", None) is not None:
798
+ with tf.name_scope(self.dense.name):
799
+ self.dense.build([None, None, self.config.hidden_size])
800
+
801
+
802
+ class TFEsmPreTrainedModel(TFPreTrainedModel):
803
+ """
804
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
805
+ models.
806
+ """
807
+
808
+ config_class = EsmConfig
809
+ base_model_prefix = "esm"
810
+
811
+
812
+ ESM_START_DOCSTRING = r"""
813
+
814
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
815
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
816
+ etc.)
817
+
818
+ This model is also a Keras [Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a
819
+ regular Keras model and refer to the TF/Keras documentation for all matters related to general usage and behavior.
820
+
821
+ Parameters:
822
+ config ([`EsmConfig`]): Model configuration class with all the parameters of the
823
+ model. Initializing with a config file does not load the weights associated with the model, only the
824
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
825
+ """
826
+
827
+ ESM_INPUTS_DOCSTRING = r"""
828
+ Args:
829
+ input_ids (`tf.Tensor` of shape `({0})`):
830
+ Indices of input sequence tokens in the vocabulary.
831
+
832
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
833
+ [`PreTrainedTokenizer.__call__`] for details.
834
+
835
+ [What are input IDs?](../glossary#input-ids)
836
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
837
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
838
+
839
+ - 1 for tokens that are **not masked**,
840
+ - 0 for tokens that are **masked**.
841
+
842
+ [What are attention masks?](../glossary#attention-mask)
843
+ position_ids (`tf.Tensor` of shape `({0})`, *optional*):
844
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
845
+ config.max_position_embeddings - 1]`.
846
+
847
+ [What are position IDs?](../glossary#position-ids)
848
+ head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
849
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
850
+
851
+ - 1 indicates the head is **not masked**,
852
+ - 0 indicates the head is **masked**.
853
+
854
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
855
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
856
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
857
+ model's internal embedding lookup matrix.
858
+ output_attentions (`bool`, *optional*):
859
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
860
+ tensors for more detail.
861
+ output_hidden_states (`bool`, *optional*):
862
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
863
+ more detail.
864
+ return_dict (`bool`, *optional*):
865
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
866
+ """
867
+
868
+
869
+ @add_start_docstrings(
870
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
871
+ ESM_START_DOCSTRING,
872
+ )
873
+ class TFEsmMainLayer(keras.layers.Layer):
874
+ """
875
+
876
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
877
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
878
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
879
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
880
+
881
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
882
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
883
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
884
+ """
885
+
886
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
887
+
888
+ def __init__(self, config, add_pooling_layer=True, name=None, **kwargs):
889
+ super().__init__(name=name, **kwargs)
890
+
891
+ self.config = config
892
+ self.is_decoder = config.is_decoder
893
+
894
+ self.embeddings = TFEsmEmbeddings(config, name="embeddings")
895
+ self.encoder = TFEsmEncoder(config, name="encoder")
896
+ self.pooler = TFEsmPooler(config, name="pooler") if add_pooling_layer else None
897
+
898
+ self.contact_head = TFEsmContactPredictionHead(
899
+ in_features=self.config.num_hidden_layers * self.config.num_attention_heads, bias=True, name="contact_head"
900
+ )
901
+
902
+ def build(self, input_shape=None):
903
+ if self.built:
904
+ return
905
+ self.built = True
906
+ if getattr(self, "embeddings", None) is not None:
907
+ with tf.name_scope(self.embeddings.name):
908
+ self.embeddings.build(None)
909
+ if getattr(self, "encoder", None) is not None:
910
+ with tf.name_scope(self.encoder.name):
911
+ self.encoder.build(None)
912
+ if getattr(self, "pooler", None) is not None:
913
+ with tf.name_scope(self.pooler.name):
914
+ self.pooler.build(None)
915
+ if getattr(self, "contact_head", None) is not None:
916
+ with tf.name_scope(self.contact_head.name):
917
+ self.contact_head.build(None)
918
+
919
+ def get_input_embeddings(self):
920
+ return self.embeddings.word_embeddings
921
+
922
+ def set_input_embeddings(self, value: tf.Variable):
923
+ self.embeddings.word_embeddings.weight = value
924
+ self.embeddings.vocab_size = shape_list(value)[0]
925
+
926
+ def _prune_heads(self, heads_to_prune):
927
+ raise NotImplementedError
928
+
929
+ def call(
930
+ self,
931
+ input_ids: TFModelInputType | None = None,
932
+ attention_mask: np.ndarray | tf.Tensor | None = None,
933
+ position_ids: np.ndarray | tf.Tensor | None = None,
934
+ head_mask: np.ndarray | tf.Tensor | None = None,
935
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
936
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
937
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
938
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
939
+ use_cache: Optional[bool] = None,
940
+ output_attentions: Optional[bool] = None,
941
+ output_hidden_states: Optional[bool] = None,
942
+ return_dict: Optional[bool] = None,
943
+ training: bool = False,
944
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
945
+ if not self.config.is_decoder:
946
+ use_cache = False
947
+
948
+ if input_ids is not None and inputs_embeds is not None:
949
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
950
+ elif input_ids is not None:
951
+ input_shape = shape_list(input_ids)
952
+ elif inputs_embeds is not None:
953
+ input_shape = shape_list(inputs_embeds)[:-1]
954
+ else:
955
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
956
+
957
+ batch_size, seq_length = input_shape
958
+
959
+ if past_key_values is None:
960
+ past_key_values_length = 0
961
+ past_key_values = [None] * len(self.encoder.layer)
962
+ else:
963
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
964
+
965
+ if attention_mask is None:
966
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
967
+
968
+ embedding_output = self.embeddings(
969
+ input_ids=input_ids,
970
+ attention_mask=attention_mask,
971
+ position_ids=position_ids,
972
+ inputs_embeds=inputs_embeds,
973
+ past_key_values_length=past_key_values_length,
974
+ training=training,
975
+ )
976
+
977
+ # We create a 3D attention mask from a 2D tensor mask.
978
+ # Sizes are [batch_size, 1, 1, to_seq_length]
979
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
980
+ # this attention mask is more simple than the triangular masking of causal attention
981
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
982
+ attention_mask_shape = shape_list(attention_mask)
983
+
984
+ mask_seq_length = seq_length + past_key_values_length
985
+ # Copied from `modeling_tf_t5.py`
986
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
987
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
988
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
989
+ if self.is_decoder:
990
+ seq_ids = tf.range(mask_seq_length)
991
+ causal_mask = tf.less_equal(
992
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
993
+ seq_ids[None, :, None],
994
+ )
995
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
996
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
997
+ attention_mask_shape = shape_list(extended_attention_mask)
998
+ extended_attention_mask = tf.reshape(
999
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
1000
+ )
1001
+ if past_key_values[0] is not None:
1002
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
1003
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
1004
+ else:
1005
+ extended_attention_mask = tf.reshape(
1006
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
1007
+ )
1008
+
1009
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
1010
+ # masked positions, this operation will create a tensor which is 0.0 for
1011
+ # positions we want to attend and -10000.0 for masked positions.
1012
+ # Since we are adding it to the raw scores before the softmax, this is
1013
+ # effectively the same as removing these entirely.
1014
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
1015
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
1016
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
1017
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
1018
+
1019
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
1020
+ if self.is_decoder and encoder_attention_mask is not None:
1021
+ # If a 2D ou 3D attention mask is provided for the cross-attention
1022
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
1023
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1024
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
1025
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
1026
+ if num_dims_encoder_attention_mask == 3:
1027
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
1028
+ if num_dims_encoder_attention_mask == 2:
1029
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
1030
+
1031
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
1032
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
1033
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
1034
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
1035
+
1036
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
1037
+ else:
1038
+ encoder_extended_attention_mask = None
1039
+
1040
+ # Prepare head mask if needed
1041
+ # 1.0 in head_mask indicate we keep the head
1042
+ # attention_probs has shape bsz x n_heads x N x N
1043
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1044
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1045
+ if head_mask is not None:
1046
+ raise NotImplementedError
1047
+ else:
1048
+ head_mask = [None] * self.config.num_hidden_layers
1049
+
1050
+ encoder_outputs = self.encoder(
1051
+ hidden_states=embedding_output,
1052
+ attention_mask=extended_attention_mask,
1053
+ head_mask=head_mask,
1054
+ encoder_hidden_states=encoder_hidden_states,
1055
+ encoder_attention_mask=encoder_extended_attention_mask,
1056
+ past_key_values=past_key_values,
1057
+ use_cache=use_cache,
1058
+ output_attentions=output_attentions,
1059
+ output_hidden_states=output_hidden_states,
1060
+ return_dict=return_dict,
1061
+ training=training,
1062
+ )
1063
+
1064
+ sequence_output = encoder_outputs[0]
1065
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
1066
+
1067
+ if not return_dict:
1068
+ return (
1069
+ sequence_output,
1070
+ pooled_output,
1071
+ ) + encoder_outputs[1:]
1072
+
1073
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
1074
+ last_hidden_state=sequence_output,
1075
+ pooler_output=pooled_output,
1076
+ past_key_values=encoder_outputs.past_key_values,
1077
+ hidden_states=encoder_outputs.hidden_states,
1078
+ attentions=encoder_outputs.attentions,
1079
+ cross_attentions=encoder_outputs.cross_attentions,
1080
+ )
1081
+
1082
+ def predict_contacts(self, tokens, attention_mask):
1083
+ attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions
1084
+ attns = tf.stack(attns, axis=1) # Matches the original model layout
1085
+ # In the original model, attentions for padding tokens are completely zeroed out.
1086
+ # This makes no difference most of the time because the other tokens won't attend to them,
1087
+ # but it does for the contact prediction task, which takes attentions as input,
1088
+ # so we have to mimic that here.
1089
+ attention_mask = tf.cast(attention_mask, attns.dtype)
1090
+ attns *= attention_mask[:, None, None, None]
1091
+ attns *= attention_mask[:, None, None, :, None]
1092
+ return self.contact_head(tokens, attns)
1093
+
1094
+
1095
+ @add_start_docstrings(
1096
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
1097
+ ESM_START_DOCSTRING,
1098
+ )
1099
+ class TFEsmModel(TFEsmPreTrainedModel):
1100
+ def __init__(self, config: EsmConfig, add_pooling_layer=True, *inputs, **kwargs):
1101
+ super().__init__(config, *inputs, **kwargs)
1102
+
1103
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=add_pooling_layer, name="esm")
1104
+
1105
+ @unpack_inputs
1106
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1107
+ @add_code_sample_docstrings(
1108
+ checkpoint=_CHECKPOINT_FOR_DOC,
1109
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
1110
+ config_class=_CONFIG_FOR_DOC,
1111
+ )
1112
+ def call(
1113
+ self,
1114
+ input_ids: TFModelInputType | None = None,
1115
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1116
+ position_ids: np.ndarray | tf.Tensor | None = None,
1117
+ head_mask: np.ndarray | tf.Tensor | None = None,
1118
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1119
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1120
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1121
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1122
+ use_cache: Optional[bool] = None,
1123
+ output_attentions: Optional[bool] = None,
1124
+ output_hidden_states: Optional[bool] = None,
1125
+ return_dict: Optional[bool] = None,
1126
+ training: Optional[bool] = False,
1127
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
1128
+ r"""
1129
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1130
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1131
+ the model is configured as a decoder.
1132
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1133
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1134
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1135
+
1136
+ - 1 for tokens that are **not masked**,
1137
+ - 0 for tokens that are **masked**.
1138
+
1139
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1140
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1141
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1142
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1143
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1144
+ use_cache (`bool`, *optional*, defaults to `True`):
1145
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1146
+ `past_key_values`). Set to `False` during training, `True` during generation
1147
+ """
1148
+ outputs = self.esm(
1149
+ input_ids=input_ids,
1150
+ attention_mask=attention_mask,
1151
+ position_ids=position_ids,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ encoder_hidden_states=encoder_hidden_states,
1155
+ encoder_attention_mask=encoder_attention_mask,
1156
+ past_key_values=past_key_values,
1157
+ use_cache=use_cache,
1158
+ output_attentions=output_attentions,
1159
+ output_hidden_states=output_hidden_states,
1160
+ return_dict=return_dict,
1161
+ training=training,
1162
+ )
1163
+ return outputs
1164
+
1165
+ def predict_contacts(self, tokens, attention_mask):
1166
+ return self.esm.predict_contacts(tokens, attention_mask)
1167
+
1168
+ def build(self, input_shape=None):
1169
+ if self.built:
1170
+ return
1171
+ self.built = True
1172
+ if getattr(self, "esm", None) is not None:
1173
+ with tf.name_scope(self.esm.name):
1174
+ self.esm.build(None)
1175
+
1176
+
1177
+ @add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING)
1178
+ class TFEsmForMaskedLM(TFEsmPreTrainedModel, TFMaskedLanguageModelingLoss):
1179
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1180
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1181
+
1182
+ def __init__(self, config):
1183
+ super().__init__(config)
1184
+
1185
+ if config.is_decoder:
1186
+ logger.warning(
1187
+ "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for "
1188
+ "bi-directional self-attention."
1189
+ )
1190
+
1191
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
1192
+ self.lm_head = TFEsmLMHead(config, name="lm_head")
1193
+ if config.tie_word_embeddings:
1194
+ # Ensure word embeddings are built so that we actually have something to tie
1195
+ with tf.name_scope(os.path.join(self._name_scope(), "esm", "embeddings", "word_embeddings")):
1196
+ self.esm.embeddings.word_embeddings.build((None, None))
1197
+ self.lm_head.decoder = self.esm.embeddings.word_embeddings.weights[0]
1198
+
1199
+ def get_output_embeddings(self):
1200
+ return self.lm_head.decoder
1201
+
1202
+ def set_output_embeddings(self, new_embeddings):
1203
+ self.lm_head.decoder = new_embeddings
1204
+
1205
+ def get_lm_head(self):
1206
+ return self.lm_head
1207
+
1208
+ @unpack_inputs
1209
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1210
+ @add_code_sample_docstrings(
1211
+ checkpoint=_CHECKPOINT_FOR_DOC,
1212
+ output_type=TFMaskedLMOutput,
1213
+ config_class=_CONFIG_FOR_DOC,
1214
+ mask="<mask>",
1215
+ )
1216
+ def call(
1217
+ self,
1218
+ input_ids: TFModelInputType | None = None,
1219
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1220
+ position_ids: np.ndarray | tf.Tensor | None = None,
1221
+ head_mask: np.ndarray | tf.Tensor | None = None,
1222
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1223
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1224
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1225
+ labels: np.ndarray | tf.Tensor | None = None,
1226
+ output_attentions: Optional[bool] = None,
1227
+ output_hidden_states: Optional[bool] = None,
1228
+ return_dict: Optional[bool] = None,
1229
+ training: bool = False,
1230
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1231
+ r"""
1232
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1233
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1234
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1235
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1236
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1237
+ Used to hide legacy arguments that have been deprecated.
1238
+ """
1239
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1240
+
1241
+ outputs = self.esm(
1242
+ input_ids,
1243
+ attention_mask=attention_mask,
1244
+ position_ids=position_ids,
1245
+ head_mask=head_mask,
1246
+ inputs_embeds=inputs_embeds,
1247
+ encoder_hidden_states=encoder_hidden_states,
1248
+ encoder_attention_mask=encoder_attention_mask,
1249
+ output_attentions=output_attentions,
1250
+ output_hidden_states=output_hidden_states,
1251
+ return_dict=return_dict,
1252
+ training=training,
1253
+ )
1254
+ sequence_output = outputs[0]
1255
+ prediction_scores = self.lm_head(sequence_output)
1256
+
1257
+ masked_lm_loss = None
1258
+ if labels is not None:
1259
+ masked_lm_loss = self.hf_compute_loss(labels=labels, logits=prediction_scores)
1260
+
1261
+ if not return_dict:
1262
+ output = (prediction_scores,) + outputs[2:]
1263
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1264
+
1265
+ return TFMaskedLMOutput(
1266
+ loss=masked_lm_loss,
1267
+ logits=prediction_scores,
1268
+ hidden_states=outputs.hidden_states,
1269
+ attentions=outputs.attentions,
1270
+ )
1271
+
1272
+ def predict_contacts(self, tokens, attention_mask):
1273
+ return self.esm.predict_contacts(tokens, attention_mask)
1274
+
1275
+ def build(self, input_shape=None):
1276
+ if self.built:
1277
+ return
1278
+ self.built = True
1279
+ if getattr(self, "esm", None) is not None:
1280
+ with tf.name_scope(self.esm.name):
1281
+ self.esm.build(None)
1282
+ if getattr(self, "lm_head", None) is not None:
1283
+ with tf.name_scope(self.lm_head.name):
1284
+ self.lm_head.build(None)
1285
+
1286
+
1287
+ class TFEsmLMHead(keras.layers.Layer):
1288
+ """ESM Head for masked language modeling."""
1289
+
1290
+ def __init__(self, config, name=None):
1291
+ super().__init__(name=name)
1292
+ self.dense = keras.layers.Dense(
1293
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1294
+ )
1295
+
1296
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1297
+ if config.tie_word_embeddings:
1298
+ self.decoder = None
1299
+ else:
1300
+ self.decoder = keras.layers.Dense(
1301
+ config.vocab_size,
1302
+ kernel_initializer=get_initializer(config.initializer_range),
1303
+ name="decoder",
1304
+ use_bias=False,
1305
+ )
1306
+ self.config = config
1307
+
1308
+ def build(self, input_shape=None):
1309
+ # Separate bias to match the PT model and allow weight cross-loading to work
1310
+ # Put it in the build so it gets the right name when adding it as a weight
1311
+ if self.built:
1312
+ return
1313
+ self.built = True
1314
+ self.bias = self.add_weight("bias", shape=(self.config.vocab_size,), initializer="zeros", trainable=True)
1315
+ if getattr(self, "dense", None) is not None:
1316
+ with tf.name_scope(self.dense.name):
1317
+ self.dense.build([None, None, self.config.hidden_size])
1318
+ if getattr(self, "layer_norm", None) is not None:
1319
+ with tf.name_scope(self.layer_norm.name):
1320
+ self.layer_norm.build([None, None, self.config.hidden_size])
1321
+ if getattr(self, "decoder", None) is not None and not self.config.tie_word_embeddings:
1322
+ with tf.name_scope(self.decoder.name):
1323
+ self.decoder.build([None, None, self.config.hidden_size])
1324
+
1325
+ def get_bias(self):
1326
+ return {"bias": self.bias}
1327
+
1328
+ def call(self, features):
1329
+ x = self.dense(features)
1330
+ x = tf.nn.gelu(x)
1331
+ x = self.layer_norm(x)
1332
+
1333
+ # project back to size of vocabulary with bias
1334
+ if self.config.tie_word_embeddings:
1335
+ x = tf.matmul(x, self.decoder, transpose_b=True) + self.bias
1336
+ else:
1337
+ x = self.decoder(x) + self.bias
1338
+ return x
1339
+
1340
+
1341
+ @add_start_docstrings(
1342
+ """
1343
+ ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1344
+ output) e.g. for GLUE tasks.
1345
+ """,
1346
+ ESM_START_DOCSTRING,
1347
+ )
1348
+ class TFEsmForSequenceClassification(TFEsmPreTrainedModel, TFSequenceClassificationLoss):
1349
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1350
+
1351
+ def __init__(self, config):
1352
+ super().__init__(config)
1353
+ self.num_labels = config.num_labels
1354
+ self.config = config
1355
+
1356
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
1357
+ self.classifier = TFEsmClassificationHead(config, name="classifier")
1358
+
1359
+ @unpack_inputs
1360
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1361
+ @add_code_sample_docstrings(
1362
+ checkpoint=_CHECKPOINT_FOR_DOC,
1363
+ output_type=TFSequenceClassifierOutput,
1364
+ config_class=_CONFIG_FOR_DOC,
1365
+ )
1366
+ def call(
1367
+ self,
1368
+ input_ids: TFModelInputType | None = None,
1369
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1370
+ position_ids: np.ndarray | tf.Tensor | None = None,
1371
+ head_mask: np.ndarray | tf.Tensor | None = None,
1372
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1373
+ labels: np.ndarray | tf.Tensor | None = None,
1374
+ output_attentions: Optional[bool] = None,
1375
+ output_hidden_states: Optional[bool] = None,
1376
+ return_dict: Optional[bool] = None,
1377
+ training: bool = False,
1378
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1379
+ r"""
1380
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1381
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1382
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1383
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1384
+ """
1385
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1386
+
1387
+ outputs = self.esm(
1388
+ input_ids,
1389
+ attention_mask=attention_mask,
1390
+ position_ids=position_ids,
1391
+ head_mask=head_mask,
1392
+ inputs_embeds=inputs_embeds,
1393
+ output_attentions=output_attentions,
1394
+ output_hidden_states=output_hidden_states,
1395
+ return_dict=return_dict,
1396
+ training=training,
1397
+ )
1398
+ sequence_output = outputs[0]
1399
+ logits = self.classifier(sequence_output)
1400
+
1401
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1402
+
1403
+ if not return_dict:
1404
+ output = (logits,) + outputs[2:]
1405
+ return ((loss,) + output) if loss is not None else output
1406
+
1407
+ return TFSequenceClassifierOutput(
1408
+ loss=loss,
1409
+ logits=logits,
1410
+ hidden_states=outputs.hidden_states,
1411
+ attentions=outputs.attentions,
1412
+ )
1413
+
1414
+ def build(self, input_shape=None):
1415
+ if self.built:
1416
+ return
1417
+ self.built = True
1418
+ if getattr(self, "esm", None) is not None:
1419
+ with tf.name_scope(self.esm.name):
1420
+ self.esm.build(None)
1421
+ if getattr(self, "classifier", None) is not None:
1422
+ with tf.name_scope(self.classifier.name):
1423
+ self.classifier.build(None)
1424
+
1425
+
1426
+ @add_start_docstrings(
1427
+ """
1428
+ ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1429
+ Named-Entity-Recognition (NER) tasks.
1430
+ """,
1431
+ ESM_START_DOCSTRING,
1432
+ )
1433
+ class TFEsmForTokenClassification(TFEsmPreTrainedModel, TFTokenClassificationLoss):
1434
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1435
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1436
+
1437
+ def __init__(self, config):
1438
+ super().__init__(config)
1439
+ self.num_labels = config.num_labels
1440
+
1441
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
1442
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1443
+ self.classifier = keras.layers.Dense(config.num_labels, name="classifier")
1444
+ self.config = config
1445
+
1446
+ @unpack_inputs
1447
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1448
+ @add_code_sample_docstrings(
1449
+ checkpoint=_CHECKPOINT_FOR_DOC,
1450
+ output_type=TFTokenClassifierOutput,
1451
+ config_class=_CONFIG_FOR_DOC,
1452
+ )
1453
+ def call(
1454
+ self,
1455
+ input_ids: TFModelInputType | None = None,
1456
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1457
+ position_ids: np.ndarray | tf.Tensor | None = None,
1458
+ head_mask: np.ndarray | tf.Tensor | None = None,
1459
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1460
+ labels: np.ndarray | tf.Tensor | None = None,
1461
+ output_attentions: Optional[bool] = None,
1462
+ output_hidden_states: Optional[bool] = None,
1463
+ return_dict: Optional[bool] = None,
1464
+ training: bool = False,
1465
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1466
+ r"""
1467
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1468
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1469
+ """
1470
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1471
+
1472
+ outputs = self.esm(
1473
+ input_ids,
1474
+ attention_mask=attention_mask,
1475
+ position_ids=position_ids,
1476
+ head_mask=head_mask,
1477
+ inputs_embeds=inputs_embeds,
1478
+ output_attentions=output_attentions,
1479
+ output_hidden_states=output_hidden_states,
1480
+ return_dict=return_dict,
1481
+ training=training,
1482
+ )
1483
+
1484
+ sequence_output = outputs[0]
1485
+
1486
+ sequence_output = self.dropout(sequence_output, training=training)
1487
+ logits = self.classifier(sequence_output)
1488
+
1489
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1490
+
1491
+ if not return_dict:
1492
+ output = (logits,) + outputs[2:]
1493
+ return ((loss,) + output) if loss is not None else output
1494
+
1495
+ return TFTokenClassifierOutput(
1496
+ loss=loss,
1497
+ logits=logits,
1498
+ hidden_states=outputs.hidden_states,
1499
+ attentions=outputs.attentions,
1500
+ )
1501
+
1502
+ def build(self, input_shape=None):
1503
+ if self.built:
1504
+ return
1505
+ self.built = True
1506
+ if getattr(self, "esm", None) is not None:
1507
+ with tf.name_scope(self.esm.name):
1508
+ self.esm.build(None)
1509
+ if getattr(self, "classifier", None) is not None:
1510
+ with tf.name_scope(self.classifier.name):
1511
+ self.classifier.build([None, None, self.config.hidden_size])
1512
+
1513
+
1514
+ class TFEsmClassificationHead(keras.layers.Layer):
1515
+ """Head for sentence-level classification tasks."""
1516
+
1517
+ def __init__(self, config, name=None):
1518
+ super().__init__(name=name)
1519
+ self.dense = keras.layers.Dense(
1520
+ config.hidden_size,
1521
+ kernel_initializer=get_initializer(config.initializer_range),
1522
+ activation="tanh",
1523
+ name="dense",
1524
+ )
1525
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1526
+ self.out_proj = keras.layers.Dense(
1527
+ config.num_labels,
1528
+ kernel_initializer=get_initializer(config.initializer_range),
1529
+ activation="linear",
1530
+ name="out_proj",
1531
+ )
1532
+ self.config = config
1533
+
1534
+ def call(self, features, training=False):
1535
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1536
+ x = self.dropout(x, training=training)
1537
+ x = self.dense(x)
1538
+ x = self.dropout(x, training=training)
1539
+ x = self.out_proj(x)
1540
+ return x
1541
+
1542
+ def build(self, input_shape=None):
1543
+ if self.built:
1544
+ return
1545
+ self.built = True
1546
+ if getattr(self, "dense", None) is not None:
1547
+ with tf.name_scope(self.dense.name):
1548
+ self.dense.build([None, None, self.config.hidden_size])
1549
+ if getattr(self, "out_proj", None) is not None:
1550
+ with tf.name_scope(self.out_proj.name):
1551
+ self.out_proj.build([None, None, self.config.hidden_size])
1552
+
1553
+
1554
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1555
+ """
1556
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1557
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1558
+
1559
+ Args:
1560
+ x: tf.Tensor x:
1561
+
1562
+ Returns: tf.Tensor
1563
+ """
1564
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1565
+ mask = tf.cast(input_ids != padding_idx, tf.int64)
1566
+ incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask
1567
+ return incremental_indices + padding_idx
llmeval-env/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/feats.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 AlQuraishi Laboratory
2
+ # Copyright 2021 DeepMind Technologies Limited
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Dict, Tuple, overload
17
+
18
+ import torch
19
+ import torch.types
20
+ from torch import nn
21
+
22
+ from . import residue_constants as rc
23
+ from .rigid_utils import Rigid, Rotation
24
+ from .tensor_utils import batched_gather
25
+
26
+
27
+ @overload
28
+ def pseudo_beta_fn(aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: None) -> torch.Tensor:
29
+ ...
30
+
31
+
32
+ @overload
33
+ def pseudo_beta_fn(
34
+ aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: torch.Tensor
35
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
36
+ ...
37
+
38
+
39
+ def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
40
+ is_gly = aatype == rc.restype_order["G"]
41
+ ca_idx = rc.atom_order["CA"]
42
+ cb_idx = rc.atom_order["CB"]
43
+ pseudo_beta = torch.where(
44
+ is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
45
+ all_atom_positions[..., ca_idx, :],
46
+ all_atom_positions[..., cb_idx, :],
47
+ )
48
+
49
+ if all_atom_masks is not None:
50
+ pseudo_beta_mask = torch.where(
51
+ is_gly,
52
+ all_atom_masks[..., ca_idx],
53
+ all_atom_masks[..., cb_idx],
54
+ )
55
+ return pseudo_beta, pseudo_beta_mask
56
+ else:
57
+ return pseudo_beta
58
+
59
+
60
+ def atom14_to_atom37(atom14: torch.Tensor, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
61
+ atom37_data = batched_gather(
62
+ atom14,
63
+ batch["residx_atom37_to_atom14"],
64
+ dim=-2,
65
+ no_batch_dims=len(atom14.shape[:-2]),
66
+ )
67
+
68
+ atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
69
+
70
+ return atom37_data
71
+
72
+
73
+ def build_template_angle_feat(template_feats: Dict[str, torch.Tensor]) -> torch.Tensor:
74
+ template_aatype = template_feats["template_aatype"]
75
+ torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"]
76
+ alt_torsion_angles_sin_cos = template_feats["template_alt_torsion_angles_sin_cos"]
77
+ torsion_angles_mask = template_feats["template_torsion_angles_mask"]
78
+ template_angle_feat = torch.cat(
79
+ [
80
+ nn.functional.one_hot(template_aatype, 22),
81
+ torsion_angles_sin_cos.reshape(*torsion_angles_sin_cos.shape[:-2], 14),
82
+ alt_torsion_angles_sin_cos.reshape(*alt_torsion_angles_sin_cos.shape[:-2], 14),
83
+ torsion_angles_mask,
84
+ ],
85
+ dim=-1,
86
+ )
87
+
88
+ return template_angle_feat
89
+
90
+
91
+ def build_template_pair_feat(
92
+ batch: Dict[str, torch.Tensor],
93
+ min_bin: torch.types.Number,
94
+ max_bin: torch.types.Number,
95
+ no_bins: int,
96
+ use_unit_vector: bool = False,
97
+ eps: float = 1e-20,
98
+ inf: float = 1e8,
99
+ ) -> torch.Tensor:
100
+ template_mask = batch["template_pseudo_beta_mask"]
101
+ template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
102
+
103
+ # Compute distogram (this seems to differ slightly from Alg. 5)
104
+ tpb = batch["template_pseudo_beta"]
105
+ dgram = torch.sum((tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True)
106
+ lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2
107
+ upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
108
+ dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
109
+
110
+ to_concat = [dgram, template_mask_2d[..., None]]
111
+
112
+ aatype_one_hot: torch.LongTensor = nn.functional.one_hot(
113
+ batch["template_aatype"],
114
+ rc.restype_num + 2,
115
+ )
116
+
117
+ n_res = batch["template_aatype"].shape[-1]
118
+ to_concat.append(aatype_one_hot[..., None, :, :].expand(*aatype_one_hot.shape[:-2], n_res, -1, -1))
119
+ to_concat.append(aatype_one_hot[..., None, :].expand(*aatype_one_hot.shape[:-2], -1, n_res, -1))
120
+
121
+ n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]]
122
+ rigids = Rigid.make_transform_from_reference(
123
+ n_xyz=batch["template_all_atom_positions"][..., n, :],
124
+ ca_xyz=batch["template_all_atom_positions"][..., ca, :],
125
+ c_xyz=batch["template_all_atom_positions"][..., c, :],
126
+ eps=eps,
127
+ )
128
+ points = rigids.get_trans()[..., None, :, :]
129
+ rigid_vec = rigids[..., None].invert_apply(points)
130
+
131
+ inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec**2, dim=-1))
132
+
133
+ t_aa_masks = batch["template_all_atom_mask"]
134
+ template_mask = t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c]
135
+ template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
136
+
137
+ inv_distance_scalar = inv_distance_scalar * template_mask_2d
138
+ unit_vector = rigid_vec * inv_distance_scalar[..., None]
139
+
140
+ if not use_unit_vector:
141
+ unit_vector = unit_vector * 0.0
142
+
143
+ to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1))
144
+ to_concat.append(template_mask_2d[..., None])
145
+
146
+ act = torch.cat(to_concat, dim=-1)
147
+ act = act * template_mask_2d[..., None]
148
+
149
+ return act
150
+
151
+
152
+ def build_extra_msa_feat(batch: Dict[str, torch.Tensor]) -> torch.Tensor:
153
+ msa_1hot: torch.LongTensor = nn.functional.one_hot(batch["extra_msa"], 23)
154
+ msa_feat = [
155
+ msa_1hot,
156
+ batch["extra_has_deletion"].unsqueeze(-1),
157
+ batch["extra_deletion_value"].unsqueeze(-1),
158
+ ]
159
+ return torch.cat(msa_feat, dim=-1)
160
+
161
+
162
+ def torsion_angles_to_frames(
163
+ r: Rigid,
164
+ alpha: torch.Tensor,
165
+ aatype: torch.Tensor,
166
+ rrgdf: torch.Tensor,
167
+ ) -> Rigid:
168
+ # [*, N, 8, 4, 4]
169
+ default_4x4 = rrgdf[aatype, ...]
170
+
171
+ # [*, N, 8] transformations, i.e.
172
+ # One [*, N, 8, 3, 3] rotation matrix and
173
+ # One [*, N, 8, 3] translation matrix
174
+ default_r = r.from_tensor_4x4(default_4x4)
175
+
176
+ bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))
177
+ bb_rot[..., 1] = 1
178
+
179
+ # [*, N, 8, 2]
180
+ alpha = torch.cat([bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2)
181
+
182
+ # [*, N, 8, 3, 3]
183
+ # Produces rotation matrices of the form:
184
+ # [
185
+ # [1, 0 , 0 ],
186
+ # [0, a_2,-a_1],
187
+ # [0, a_1, a_2]
188
+ # ]
189
+ # This follows the original code rather than the supplement, which uses
190
+ # different indices.
191
+
192
+ all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)
193
+ all_rots[..., 0, 0] = 1
194
+ all_rots[..., 1, 1] = alpha[..., 1]
195
+ all_rots[..., 1, 2] = -alpha[..., 0]
196
+ all_rots[..., 2, 1:] = alpha
197
+
198
+ all_frames = default_r.compose(Rigid(Rotation(rot_mats=all_rots), None))
199
+
200
+ chi2_frame_to_frame = all_frames[..., 5]
201
+ chi3_frame_to_frame = all_frames[..., 6]
202
+ chi4_frame_to_frame = all_frames[..., 7]
203
+
204
+ chi1_frame_to_bb = all_frames[..., 4]
205
+ chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)
206
+ chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)
207
+ chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)
208
+
209
+ all_frames_to_bb = Rigid.cat(
210
+ [
211
+ all_frames[..., :5],
212
+ chi2_frame_to_bb.unsqueeze(-1),
213
+ chi3_frame_to_bb.unsqueeze(-1),
214
+ chi4_frame_to_bb.unsqueeze(-1),
215
+ ],
216
+ dim=-1,
217
+ )
218
+
219
+ all_frames_to_global = r[..., None].compose(all_frames_to_bb)
220
+
221
+ return all_frames_to_global
222
+
223
+
224
+ def frames_and_literature_positions_to_atom14_pos(
225
+ r: Rigid,
226
+ aatype: torch.Tensor,
227
+ default_frames: torch.Tensor,
228
+ group_idx: torch.Tensor,
229
+ atom_mask: torch.Tensor,
230
+ lit_positions: torch.Tensor,
231
+ ) -> torch.Tensor:
232
+ # [*, N, 14]
233
+ group_mask = group_idx[aatype, ...]
234
+
235
+ # [*, N, 14, 8]
236
+ group_mask_one_hot: torch.LongTensor = nn.functional.one_hot(
237
+ group_mask,
238
+ num_classes=default_frames.shape[-3],
239
+ )
240
+
241
+ # [*, N, 14, 8]
242
+ t_atoms_to_global = r[..., None, :] * group_mask_one_hot
243
+
244
+ # [*, N, 14]
245
+ t_atoms_to_global = t_atoms_to_global.map_tensor_fn(lambda x: torch.sum(x, dim=-1))
246
+
247
+ # [*, N, 14, 1]
248
+ atom_mask = atom_mask[aatype, ...].unsqueeze(-1)
249
+
250
+ # [*, N, 14, 3]
251
+ lit_positions = lit_positions[aatype, ...]
252
+ pred_positions = t_atoms_to_global.apply(lit_positions)
253
+ pred_positions = pred_positions * atom_mask
254
+
255
+ return pred_positions
llmeval-env/lib/python3.10/site-packages/transformers/models/esm/tokenization_esm.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for ESM."""
16
+ import os
17
+ from typing import List, Optional
18
+
19
+ from ...tokenization_utils import PreTrainedTokenizer
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
26
+
27
+
28
+ def load_vocab_file(vocab_file):
29
+ with open(vocab_file, "r") as f:
30
+ lines = f.read().splitlines()
31
+ return [l.strip() for l in lines]
32
+
33
+
34
+ class EsmTokenizer(PreTrainedTokenizer):
35
+ """
36
+ Constructs an ESM tokenizer.
37
+ """
38
+
39
+ vocab_files_names = VOCAB_FILES_NAMES
40
+ model_input_names = ["input_ids", "attention_mask"]
41
+
42
+ def __init__(
43
+ self,
44
+ vocab_file,
45
+ unk_token="<unk>",
46
+ cls_token="<cls>",
47
+ pad_token="<pad>",
48
+ mask_token="<mask>",
49
+ eos_token="<eos>",
50
+ **kwargs,
51
+ ):
52
+ self.all_tokens = load_vocab_file(vocab_file)
53
+ self._id_to_token = dict(enumerate(self.all_tokens))
54
+ self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
55
+ super().__init__(
56
+ unk_token=unk_token,
57
+ cls_token=cls_token,
58
+ pad_token=pad_token,
59
+ mask_token=mask_token,
60
+ eos_token=eos_token,
61
+ **kwargs,
62
+ )
63
+
64
+ # TODO, all the tokens are added? But they are also part of the vocab... bit strange.
65
+ # none of them are special, but they all need special splitting.
66
+
67
+ self.unique_no_split_tokens = self.all_tokens
68
+ self._update_trie(self.unique_no_split_tokens)
69
+
70
+ def _convert_id_to_token(self, index: int) -> str:
71
+ return self._id_to_token.get(index, self.unk_token)
72
+
73
+ def _convert_token_to_id(self, token: str) -> int:
74
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
75
+
76
+ def _tokenize(self, text, **kwargs):
77
+ return text.split()
78
+
79
+ def get_vocab(self):
80
+ base_vocab = self._token_to_id.copy()
81
+ base_vocab.update(self.added_tokens_encoder)
82
+ return base_vocab
83
+
84
+ def token_to_id(self, token: str) -> int:
85
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
86
+
87
+ def id_to_token(self, index: int) -> str:
88
+ return self._id_to_token.get(index, self.unk_token)
89
+
90
+ def build_inputs_with_special_tokens(
91
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
92
+ ) -> List[int]:
93
+ cls = [self.cls_token_id]
94
+ sep = [self.eos_token_id] # No sep token in ESM vocabulary
95
+ if token_ids_1 is None:
96
+ if self.eos_token_id is None:
97
+ return cls + token_ids_0
98
+ else:
99
+ return cls + token_ids_0 + sep
100
+ elif self.eos_token_id is None:
101
+ raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!")
102
+ return cls + token_ids_0 + sep + token_ids_1 + sep # Multiple inputs always have an EOS token
103
+
104
+ def get_special_tokens_mask(
105
+ self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
106
+ ) -> List[int]:
107
+ """
108
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
109
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
110
+
111
+ Args:
112
+ token_ids_0 (`List[int]`):
113
+ List of ids of the first sequence.
114
+ token_ids_1 (`List[int]`, *optional*):
115
+ List of ids of the second sequence.
116
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
117
+ Whether or not the token list is already formatted with special tokens for the model.
118
+
119
+ Returns:
120
+ A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
121
+ """
122
+ if already_has_special_tokens:
123
+ if token_ids_1 is not None:
124
+ raise ValueError(
125
+ "You should not supply a second sequence if the provided sequence of "
126
+ "ids is already formatted with special tokens for the model."
127
+ )
128
+
129
+ return [1 if token in self.all_special_ids else 0 for token in token_ids_0]
130
+ mask = [1] + ([0] * len(token_ids_0)) + [1]
131
+ if token_ids_1 is not None:
132
+ mask += [0] * len(token_ids_1) + [1]
133
+ return mask
134
+
135
+ def save_vocabulary(self, save_directory, filename_prefix):
136
+ vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt")
137
+ with open(vocab_file, "w") as f:
138
+ f.write("\n".join(self.all_tokens))
139
+ return (vocab_file,)
140
+
141
+ @property
142
+ def vocab_size(self) -> int:
143
+ return len(self.all_tokens)
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__init__.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ is_vision_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
28
+ "processing_layoutlmv2": ["LayoutLMv2Processor"],
29
+ "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
30
+ }
31
+
32
+ try:
33
+ if not is_tokenizers_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["tokenization_layoutlmv2_fast"] = ["LayoutLMv2TokenizerFast"]
39
+
40
+ try:
41
+ if not is_vision_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["feature_extraction_layoutlmv2"] = ["LayoutLMv2FeatureExtractor"]
47
+ _import_structure["image_processing_layoutlmv2"] = ["LayoutLMv2ImageProcessor"]
48
+
49
+ try:
50
+ if not is_torch_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ _import_structure["modeling_layoutlmv2"] = [
56
+ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
57
+ "LayoutLMv2ForQuestionAnswering",
58
+ "LayoutLMv2ForSequenceClassification",
59
+ "LayoutLMv2ForTokenClassification",
60
+ "LayoutLMv2Layer",
61
+ "LayoutLMv2Model",
62
+ "LayoutLMv2PreTrainedModel",
63
+ ]
64
+
65
+ if TYPE_CHECKING:
66
+ from .configuration_layoutlmv2 import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMv2Config
67
+ from .processing_layoutlmv2 import LayoutLMv2Processor
68
+ from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer
69
+
70
+ try:
71
+ if not is_tokenizers_available():
72
+ raise OptionalDependencyNotAvailable()
73
+ except OptionalDependencyNotAvailable:
74
+ pass
75
+ else:
76
+ from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast
77
+
78
+ try:
79
+ if not is_vision_available():
80
+ raise OptionalDependencyNotAvailable()
81
+ except OptionalDependencyNotAvailable:
82
+ pass
83
+ else:
84
+ from .feature_extraction_layoutlmv2 import LayoutLMv2FeatureExtractor, LayoutLMv2ImageProcessor
85
+
86
+ try:
87
+ if not is_torch_available():
88
+ raise OptionalDependencyNotAvailable()
89
+ except OptionalDependencyNotAvailable:
90
+ pass
91
+ else:
92
+ from .modeling_layoutlmv2 import (
93
+ LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
94
+ LayoutLMv2ForQuestionAnswering,
95
+ LayoutLMv2ForSequenceClassification,
96
+ LayoutLMv2ForTokenClassification,
97
+ LayoutLMv2Layer,
98
+ LayoutLMv2Model,
99
+ LayoutLMv2PreTrainedModel,
100
+ )
101
+ else:
102
+ import sys
103
+
104
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/configuration_layoutlmv2.cpython-310.pyc ADDED
Binary file (9.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/feature_extraction_layoutlmv2.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/image_processing_layoutlmv2.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/modeling_layoutlmv2.cpython-310.pyc ADDED
Binary file (42.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/processing_layoutlmv2.cpython-310.pyc ADDED
Binary file (7.32 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/tokenization_layoutlmv2.cpython-310.pyc ADDED
Binary file (46.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/__pycache__/tokenization_layoutlmv2_fast.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/configuration_layoutlmv2.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LayoutLMv2 model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import is_detectron2_available, logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ # soft dependency
28
+ if is_detectron2_available():
29
+ import detectron2
30
+
31
+
32
+ class LayoutLMv2Config(PretrainedConfig):
33
+ r"""
34
+ This is the configuration class to store the configuration of a [`LayoutLMv2Model`]. It is used to instantiate an
35
+ LayoutLMv2 model according to the specified arguments, defining the model architecture. Instantiating a
36
+ configuration with the defaults will yield a similar configuration to that of the LayoutLMv2
37
+ [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) architecture.
38
+
39
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
40
+ documentation from [`PretrainedConfig`] for more information.
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*, defaults to 30522):
44
+ Vocabulary size of the LayoutLMv2 model. Defines the number of different tokens that can be represented by
45
+ the `inputs_ids` passed when calling [`LayoutLMv2Model`] or [`TFLayoutLMv2Model`].
46
+ hidden_size (`int`, *optional*, defaults to 768):
47
+ Dimension of the encoder layers and the pooler layer.
48
+ num_hidden_layers (`int`, *optional*, defaults to 12):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 12):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ intermediate_size (`int`, *optional*, defaults to 3072):
53
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
55
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
56
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
57
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ max_position_embeddings (`int`, *optional*, defaults to 512):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ type_vocab_size (`int`, *optional*, defaults to 2):
65
+ The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv2Model`] or
66
+ [`TFLayoutLMv2Model`].
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
70
+ The epsilon used by the layer normalization layers.
71
+ max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
72
+ The maximum value that the 2D position embedding might ever be used with. Typically set this to something
73
+ large just in case (e.g., 1024).
74
+ max_rel_pos (`int`, *optional*, defaults to 128):
75
+ The maximum number of relative positions to be used in the self-attention mechanism.
76
+ rel_pos_bins (`int`, *optional*, defaults to 32):
77
+ The number of relative position bins to be used in the self-attention mechanism.
78
+ fast_qkv (`bool`, *optional*, defaults to `True`):
79
+ Whether or not to use a single matrix for the queries, keys, values in the self-attention layers.
80
+ max_rel_2d_pos (`int`, *optional*, defaults to 256):
81
+ The maximum number of relative 2D positions in the self-attention mechanism.
82
+ rel_2d_pos_bins (`int`, *optional*, defaults to 64):
83
+ The number of 2D relative position bins in the self-attention mechanism.
84
+ image_feature_pool_shape (`List[int]`, *optional*, defaults to [7, 7, 256]):
85
+ The shape of the average-pooled feature map.
86
+ coordinate_size (`int`, *optional*, defaults to 128):
87
+ Dimension of the coordinate embeddings.
88
+ shape_size (`int`, *optional*, defaults to 128):
89
+ Dimension of the width and height embeddings.
90
+ has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
91
+ Whether or not to use a relative attention bias in the self-attention mechanism.
92
+ has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
93
+ Whether or not to use a spatial attention bias in the self-attention mechanism.
94
+ has_visual_segment_embedding (`bool`, *optional*, defaults to `False`):
95
+ Whether or not to add visual segment embeddings.
96
+ detectron2_config_args (`dict`, *optional*):
97
+ Dictionary containing the configuration arguments of the Detectron2 visual backbone. Refer to [this
98
+ file](https://github.com/microsoft/unilm/blob/master/layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py)
99
+ for details regarding default values.
100
+
101
+ Example:
102
+
103
+ ```python
104
+ >>> from transformers import LayoutLMv2Config, LayoutLMv2Model
105
+
106
+ >>> # Initializing a LayoutLMv2 microsoft/layoutlmv2-base-uncased style configuration
107
+ >>> configuration = LayoutLMv2Config()
108
+
109
+ >>> # Initializing a model (with random weights) from the microsoft/layoutlmv2-base-uncased style configuration
110
+ >>> model = LayoutLMv2Model(configuration)
111
+
112
+ >>> # Accessing the model configuration
113
+ >>> configuration = model.config
114
+ ```"""
115
+
116
+ model_type = "layoutlmv2"
117
+
118
+ def __init__(
119
+ self,
120
+ vocab_size=30522,
121
+ hidden_size=768,
122
+ num_hidden_layers=12,
123
+ num_attention_heads=12,
124
+ intermediate_size=3072,
125
+ hidden_act="gelu",
126
+ hidden_dropout_prob=0.1,
127
+ attention_probs_dropout_prob=0.1,
128
+ max_position_embeddings=512,
129
+ type_vocab_size=2,
130
+ initializer_range=0.02,
131
+ layer_norm_eps=1e-12,
132
+ pad_token_id=0,
133
+ max_2d_position_embeddings=1024,
134
+ max_rel_pos=128,
135
+ rel_pos_bins=32,
136
+ fast_qkv=True,
137
+ max_rel_2d_pos=256,
138
+ rel_2d_pos_bins=64,
139
+ convert_sync_batchnorm=True,
140
+ image_feature_pool_shape=[7, 7, 256],
141
+ coordinate_size=128,
142
+ shape_size=128,
143
+ has_relative_attention_bias=True,
144
+ has_spatial_attention_bias=True,
145
+ has_visual_segment_embedding=False,
146
+ detectron2_config_args=None,
147
+ **kwargs,
148
+ ):
149
+ super().__init__(
150
+ vocab_size=vocab_size,
151
+ hidden_size=hidden_size,
152
+ num_hidden_layers=num_hidden_layers,
153
+ num_attention_heads=num_attention_heads,
154
+ intermediate_size=intermediate_size,
155
+ hidden_act=hidden_act,
156
+ hidden_dropout_prob=hidden_dropout_prob,
157
+ attention_probs_dropout_prob=attention_probs_dropout_prob,
158
+ max_position_embeddings=max_position_embeddings,
159
+ type_vocab_size=type_vocab_size,
160
+ initializer_range=initializer_range,
161
+ layer_norm_eps=layer_norm_eps,
162
+ pad_token_id=pad_token_id,
163
+ **kwargs,
164
+ )
165
+ self.max_2d_position_embeddings = max_2d_position_embeddings
166
+ self.max_rel_pos = max_rel_pos
167
+ self.rel_pos_bins = rel_pos_bins
168
+ self.fast_qkv = fast_qkv
169
+ self.max_rel_2d_pos = max_rel_2d_pos
170
+ self.rel_2d_pos_bins = rel_2d_pos_bins
171
+ self.convert_sync_batchnorm = convert_sync_batchnorm
172
+ self.image_feature_pool_shape = image_feature_pool_shape
173
+ self.coordinate_size = coordinate_size
174
+ self.shape_size = shape_size
175
+ self.has_relative_attention_bias = has_relative_attention_bias
176
+ self.has_spatial_attention_bias = has_spatial_attention_bias
177
+ self.has_visual_segment_embedding = has_visual_segment_embedding
178
+ self.detectron2_config_args = (
179
+ detectron2_config_args if detectron2_config_args is not None else self.get_default_detectron2_config()
180
+ )
181
+
182
+ @classmethod
183
+ def get_default_detectron2_config(self):
184
+ return {
185
+ "MODEL.MASK_ON": True,
186
+ "MODEL.PIXEL_STD": [57.375, 57.120, 58.395],
187
+ "MODEL.BACKBONE.NAME": "build_resnet_fpn_backbone",
188
+ "MODEL.FPN.IN_FEATURES": ["res2", "res3", "res4", "res5"],
189
+ "MODEL.ANCHOR_GENERATOR.SIZES": [[32], [64], [128], [256], [512]],
190
+ "MODEL.RPN.IN_FEATURES": ["p2", "p3", "p4", "p5", "p6"],
191
+ "MODEL.RPN.PRE_NMS_TOPK_TRAIN": 2000,
192
+ "MODEL.RPN.PRE_NMS_TOPK_TEST": 1000,
193
+ "MODEL.RPN.POST_NMS_TOPK_TRAIN": 1000,
194
+ "MODEL.POST_NMS_TOPK_TEST": 1000,
195
+ "MODEL.ROI_HEADS.NAME": "StandardROIHeads",
196
+ "MODEL.ROI_HEADS.NUM_CLASSES": 5,
197
+ "MODEL.ROI_HEADS.IN_FEATURES": ["p2", "p3", "p4", "p5"],
198
+ "MODEL.ROI_BOX_HEAD.NAME": "FastRCNNConvFCHead",
199
+ "MODEL.ROI_BOX_HEAD.NUM_FC": 2,
200
+ "MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION": 14,
201
+ "MODEL.ROI_MASK_HEAD.NAME": "MaskRCNNConvUpsampleHead",
202
+ "MODEL.ROI_MASK_HEAD.NUM_CONV": 4,
203
+ "MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION": 7,
204
+ "MODEL.RESNETS.DEPTH": 101,
205
+ "MODEL.RESNETS.SIZES": [[32], [64], [128], [256], [512]],
206
+ "MODEL.RESNETS.ASPECT_RATIOS": [[0.5, 1.0, 2.0]],
207
+ "MODEL.RESNETS.OUT_FEATURES": ["res2", "res3", "res4", "res5"],
208
+ "MODEL.RESNETS.NUM_GROUPS": 32,
209
+ "MODEL.RESNETS.WIDTH_PER_GROUP": 8,
210
+ "MODEL.RESNETS.STRIDE_IN_1X1": False,
211
+ }
212
+
213
+ def get_detectron2_config(self):
214
+ detectron2_config = detectron2.config.get_cfg()
215
+ for k, v in self.detectron2_config_args.items():
216
+ attributes = k.split(".")
217
+ to_set = detectron2_config
218
+ for attribute in attributes[:-1]:
219
+ to_set = getattr(to_set, attribute)
220
+ setattr(to_set, attributes[-1], v)
221
+
222
+ return detectron2_config
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for LayoutLMv2.
17
+ """
18
+
19
+ import warnings
20
+
21
+ from ...utils import logging
22
+ from .image_processing_layoutlmv2 import LayoutLMv2ImageProcessor
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class LayoutLMv2FeatureExtractor(LayoutLMv2ImageProcessor):
29
+ def __init__(self, *args, **kwargs) -> None:
30
+ warnings.warn(
31
+ "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
32
+ " Please use LayoutLMv2ImageProcessor instead.",
33
+ FutureWarning,
34
+ )
35
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/modeling_layoutlmv2.py ADDED
@@ -0,0 +1,1407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Microsoft Research The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch LayoutLMv2 model."""
16
+
17
+ import math
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ BaseModelOutputWithPooling,
29
+ QuestionAnsweringModelOutput,
30
+ SequenceClassifierOutput,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward
35
+ from ...utils import (
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ is_detectron2_available,
39
+ logging,
40
+ replace_return_docstrings,
41
+ requires_backends,
42
+ )
43
+ from .configuration_layoutlmv2 import LayoutLMv2Config
44
+
45
+
46
+ # soft dependency
47
+ if is_detectron2_available():
48
+ import detectron2
49
+ from detectron2.modeling import META_ARCH_REGISTRY
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CHECKPOINT_FOR_DOC = "microsoft/layoutlmv2-base-uncased"
54
+ _CONFIG_FOR_DOC = "LayoutLMv2Config"
55
+
56
+
57
+ from ..deprecated._archive_maps import LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
58
+
59
+
60
+ class LayoutLMv2Embeddings(nn.Module):
61
+ """Construct the embeddings from word, position and token_type embeddings."""
62
+
63
+ def __init__(self, config):
64
+ super(LayoutLMv2Embeddings, self).__init__()
65
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
66
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
67
+
68
+ self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
69
+ self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
70
+ self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
71
+ self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
72
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
73
+
74
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
75
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
76
+
77
+ self.register_buffer(
78
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
79
+ )
80
+
81
+ def _calc_spatial_position_embeddings(self, bbox):
82
+ try:
83
+ left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
84
+ upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
85
+ right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
86
+ lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
87
+ except IndexError as e:
88
+ raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e
89
+
90
+ h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
91
+ w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
92
+
93
+ spatial_position_embeddings = torch.cat(
94
+ [
95
+ left_position_embeddings,
96
+ upper_position_embeddings,
97
+ right_position_embeddings,
98
+ lower_position_embeddings,
99
+ h_position_embeddings,
100
+ w_position_embeddings,
101
+ ],
102
+ dim=-1,
103
+ )
104
+ return spatial_position_embeddings
105
+
106
+
107
+ class LayoutLMv2SelfAttention(nn.Module):
108
+ def __init__(self, config):
109
+ super().__init__()
110
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
111
+ raise ValueError(
112
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
113
+ f"heads ({config.num_attention_heads})"
114
+ )
115
+ self.fast_qkv = config.fast_qkv
116
+ self.num_attention_heads = config.num_attention_heads
117
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
118
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
119
+
120
+ self.has_relative_attention_bias = config.has_relative_attention_bias
121
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
122
+
123
+ if config.fast_qkv:
124
+ self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
125
+ self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
126
+ self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
127
+ else:
128
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
129
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
130
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
131
+
132
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
133
+
134
+ def transpose_for_scores(self, x):
135
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
136
+ x = x.view(*new_x_shape)
137
+ return x.permute(0, 2, 1, 3)
138
+
139
+ def compute_qkv(self, hidden_states):
140
+ if self.fast_qkv:
141
+ qkv = self.qkv_linear(hidden_states)
142
+ q, k, v = torch.chunk(qkv, 3, dim=-1)
143
+ if q.ndimension() == self.q_bias.ndimension():
144
+ q = q + self.q_bias
145
+ v = v + self.v_bias
146
+ else:
147
+ _sz = (1,) * (q.ndimension() - 1) + (-1,)
148
+ q = q + self.q_bias.view(*_sz)
149
+ v = v + self.v_bias.view(*_sz)
150
+ else:
151
+ q = self.query(hidden_states)
152
+ k = self.key(hidden_states)
153
+ v = self.value(hidden_states)
154
+ return q, k, v
155
+
156
+ def forward(
157
+ self,
158
+ hidden_states,
159
+ attention_mask=None,
160
+ head_mask=None,
161
+ output_attentions=False,
162
+ rel_pos=None,
163
+ rel_2d_pos=None,
164
+ ):
165
+ q, k, v = self.compute_qkv(hidden_states)
166
+
167
+ # (B, L, H*D) -> (B, H, L, D)
168
+ query_layer = self.transpose_for_scores(q)
169
+ key_layer = self.transpose_for_scores(k)
170
+ value_layer = self.transpose_for_scores(v)
171
+
172
+ query_layer = query_layer / math.sqrt(self.attention_head_size)
173
+ # [BSZ, NAT, L, L]
174
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
175
+ if self.has_relative_attention_bias:
176
+ attention_scores += rel_pos
177
+ if self.has_spatial_attention_bias:
178
+ attention_scores += rel_2d_pos
179
+ attention_scores = attention_scores.float().masked_fill_(
180
+ attention_mask.to(torch.bool), torch.finfo(attention_scores.dtype).min
181
+ )
182
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
183
+ # This is actually dropping out entire tokens to attend to, which might
184
+ # seem a bit unusual, but is taken from the original Transformer paper.
185
+ attention_probs = self.dropout(attention_probs)
186
+
187
+ # Mask heads if we want to
188
+ if head_mask is not None:
189
+ attention_probs = attention_probs * head_mask
190
+
191
+ context_layer = torch.matmul(attention_probs, value_layer)
192
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
193
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
194
+ context_layer = context_layer.view(*new_context_layer_shape)
195
+
196
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
197
+ return outputs
198
+
199
+
200
+ class LayoutLMv2Attention(nn.Module):
201
+ def __init__(self, config):
202
+ super().__init__()
203
+ self.self = LayoutLMv2SelfAttention(config)
204
+ self.output = LayoutLMv2SelfOutput(config)
205
+
206
+ def forward(
207
+ self,
208
+ hidden_states,
209
+ attention_mask=None,
210
+ head_mask=None,
211
+ output_attentions=False,
212
+ rel_pos=None,
213
+ rel_2d_pos=None,
214
+ ):
215
+ self_outputs = self.self(
216
+ hidden_states,
217
+ attention_mask,
218
+ head_mask,
219
+ output_attentions,
220
+ rel_pos=rel_pos,
221
+ rel_2d_pos=rel_2d_pos,
222
+ )
223
+ attention_output = self.output(self_outputs[0], hidden_states)
224
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
225
+ return outputs
226
+
227
+
228
+ class LayoutLMv2SelfOutput(nn.Module):
229
+ def __init__(self, config):
230
+ super().__init__()
231
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
232
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
233
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
234
+
235
+ def forward(self, hidden_states, input_tensor):
236
+ hidden_states = self.dense(hidden_states)
237
+ hidden_states = self.dropout(hidden_states)
238
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
239
+ return hidden_states
240
+
241
+
242
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->LayoutLMv2
243
+ class LayoutLMv2Intermediate(nn.Module):
244
+ def __init__(self, config):
245
+ super().__init__()
246
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
247
+ if isinstance(config.hidden_act, str):
248
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
249
+ else:
250
+ self.intermediate_act_fn = config.hidden_act
251
+
252
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
253
+ hidden_states = self.dense(hidden_states)
254
+ hidden_states = self.intermediate_act_fn(hidden_states)
255
+ return hidden_states
256
+
257
+
258
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM
259
+ class LayoutLMv2Output(nn.Module):
260
+ def __init__(self, config):
261
+ super().__init__()
262
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
263
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
264
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
265
+
266
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
267
+ hidden_states = self.dense(hidden_states)
268
+ hidden_states = self.dropout(hidden_states)
269
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
270
+ return hidden_states
271
+
272
+
273
+ class LayoutLMv2Layer(nn.Module):
274
+ def __init__(self, config):
275
+ super().__init__()
276
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
277
+ self.seq_len_dim = 1
278
+ self.attention = LayoutLMv2Attention(config)
279
+ self.intermediate = LayoutLMv2Intermediate(config)
280
+ self.output = LayoutLMv2Output(config)
281
+
282
+ def forward(
283
+ self,
284
+ hidden_states,
285
+ attention_mask=None,
286
+ head_mask=None,
287
+ output_attentions=False,
288
+ rel_pos=None,
289
+ rel_2d_pos=None,
290
+ ):
291
+ self_attention_outputs = self.attention(
292
+ hidden_states,
293
+ attention_mask,
294
+ head_mask,
295
+ output_attentions=output_attentions,
296
+ rel_pos=rel_pos,
297
+ rel_2d_pos=rel_2d_pos,
298
+ )
299
+ attention_output = self_attention_outputs[0]
300
+
301
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
302
+
303
+ layer_output = apply_chunking_to_forward(
304
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
305
+ )
306
+ outputs = (layer_output,) + outputs
307
+
308
+ return outputs
309
+
310
+ def feed_forward_chunk(self, attention_output):
311
+ intermediate_output = self.intermediate(attention_output)
312
+ layer_output = self.output(intermediate_output, attention_output)
313
+ return layer_output
314
+
315
+
316
+ def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
317
+ """
318
+ Adapted from Mesh Tensorflow:
319
+ https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
320
+ Translate relative position to a bucket number for relative attention. The relative position is defined as
321
+ memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
322
+ position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small
323
+ absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions
324
+ >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should
325
+ allow for more graceful generalization to longer sequences than the model has been trained on.
326
+
327
+ Args:
328
+ relative_position: an int32 Tensor
329
+ bidirectional: a boolean - whether the attention is bidirectional
330
+ num_buckets: an integer
331
+ max_distance: an integer
332
+
333
+ Returns:
334
+ a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
335
+ """
336
+
337
+ ret = 0
338
+ if bidirectional:
339
+ num_buckets //= 2
340
+ ret += (relative_position > 0).long() * num_buckets
341
+ n = torch.abs(relative_position)
342
+ else:
343
+ n = torch.max(-relative_position, torch.zeros_like(relative_position))
344
+ # now n is in the range [0, inf)
345
+
346
+ # half of the buckets are for exact increments in positions
347
+ max_exact = num_buckets // 2
348
+ is_small = n < max_exact
349
+
350
+ # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
351
+ val_if_large = max_exact + (
352
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
353
+ ).to(torch.long)
354
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
355
+
356
+ ret += torch.where(is_small, n, val_if_large)
357
+ return ret
358
+
359
+
360
+ class LayoutLMv2Encoder(nn.Module):
361
+ def __init__(self, config):
362
+ super().__init__()
363
+ self.config = config
364
+ self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
365
+
366
+ self.has_relative_attention_bias = config.has_relative_attention_bias
367
+ self.has_spatial_attention_bias = config.has_spatial_attention_bias
368
+
369
+ if self.has_relative_attention_bias:
370
+ self.rel_pos_bins = config.rel_pos_bins
371
+ self.max_rel_pos = config.max_rel_pos
372
+ self.rel_pos_bias = nn.Linear(self.rel_pos_bins, config.num_attention_heads, bias=False)
373
+
374
+ if self.has_spatial_attention_bias:
375
+ self.max_rel_2d_pos = config.max_rel_2d_pos
376
+ self.rel_2d_pos_bins = config.rel_2d_pos_bins
377
+ self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
378
+ self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
379
+
380
+ self.gradient_checkpointing = False
381
+
382
+ def _calculate_1d_position_embeddings(self, position_ids):
383
+ rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
384
+ rel_pos = relative_position_bucket(
385
+ rel_pos_mat,
386
+ num_buckets=self.rel_pos_bins,
387
+ max_distance=self.max_rel_pos,
388
+ )
389
+ rel_pos = self.rel_pos_bias.weight.t()[rel_pos].permute(0, 3, 1, 2)
390
+ rel_pos = rel_pos.contiguous()
391
+ return rel_pos
392
+
393
+ def _calculate_2d_position_embeddings(self, bbox):
394
+ position_coord_x = bbox[:, :, 0]
395
+ position_coord_y = bbox[:, :, 3]
396
+ rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
397
+ rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
398
+ rel_pos_x = relative_position_bucket(
399
+ rel_pos_x_2d_mat,
400
+ num_buckets=self.rel_2d_pos_bins,
401
+ max_distance=self.max_rel_2d_pos,
402
+ )
403
+ rel_pos_y = relative_position_bucket(
404
+ rel_pos_y_2d_mat,
405
+ num_buckets=self.rel_2d_pos_bins,
406
+ max_distance=self.max_rel_2d_pos,
407
+ )
408
+ rel_pos_x = self.rel_pos_x_bias.weight.t()[rel_pos_x].permute(0, 3, 1, 2)
409
+ rel_pos_y = self.rel_pos_y_bias.weight.t()[rel_pos_y].permute(0, 3, 1, 2)
410
+ rel_pos_x = rel_pos_x.contiguous()
411
+ rel_pos_y = rel_pos_y.contiguous()
412
+ rel_2d_pos = rel_pos_x + rel_pos_y
413
+ return rel_2d_pos
414
+
415
+ def forward(
416
+ self,
417
+ hidden_states,
418
+ attention_mask=None,
419
+ head_mask=None,
420
+ output_attentions=False,
421
+ output_hidden_states=False,
422
+ return_dict=True,
423
+ bbox=None,
424
+ position_ids=None,
425
+ ):
426
+ all_hidden_states = () if output_hidden_states else None
427
+ all_self_attentions = () if output_attentions else None
428
+
429
+ rel_pos = self._calculate_1d_position_embeddings(position_ids) if self.has_relative_attention_bias else None
430
+ rel_2d_pos = self._calculate_2d_position_embeddings(bbox) if self.has_spatial_attention_bias else None
431
+
432
+ for i, layer_module in enumerate(self.layer):
433
+ if output_hidden_states:
434
+ all_hidden_states = all_hidden_states + (hidden_states,)
435
+
436
+ layer_head_mask = head_mask[i] if head_mask is not None else None
437
+
438
+ if self.gradient_checkpointing and self.training:
439
+ layer_outputs = self._gradient_checkpointing_func(
440
+ layer_module.__call__,
441
+ hidden_states,
442
+ attention_mask,
443
+ layer_head_mask,
444
+ output_attentions,
445
+ rel_pos=rel_pos,
446
+ rel_2d_pos=rel_2d_pos,
447
+ )
448
+ else:
449
+ layer_outputs = layer_module(
450
+ hidden_states,
451
+ attention_mask,
452
+ layer_head_mask,
453
+ output_attentions,
454
+ rel_pos=rel_pos,
455
+ rel_2d_pos=rel_2d_pos,
456
+ )
457
+
458
+ hidden_states = layer_outputs[0]
459
+ if output_attentions:
460
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
461
+
462
+ if output_hidden_states:
463
+ all_hidden_states = all_hidden_states + (hidden_states,)
464
+
465
+ if not return_dict:
466
+ return tuple(
467
+ v
468
+ for v in [
469
+ hidden_states,
470
+ all_hidden_states,
471
+ all_self_attentions,
472
+ ]
473
+ if v is not None
474
+ )
475
+ return BaseModelOutput(
476
+ last_hidden_state=hidden_states,
477
+ hidden_states=all_hidden_states,
478
+ attentions=all_self_attentions,
479
+ )
480
+
481
+
482
+ class LayoutLMv2PreTrainedModel(PreTrainedModel):
483
+ """
484
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
485
+ models.
486
+ """
487
+
488
+ config_class = LayoutLMv2Config
489
+ base_model_prefix = "layoutlmv2"
490
+
491
+ def _init_weights(self, module):
492
+ """Initialize the weights"""
493
+ if isinstance(module, nn.Linear):
494
+ # Slightly different from the TF version which uses truncated_normal for initialization
495
+ # cf https://github.com/pytorch/pytorch/pull/5617
496
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
497
+ if module.bias is not None:
498
+ module.bias.data.zero_()
499
+ elif isinstance(module, nn.Embedding):
500
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
501
+ if module.padding_idx is not None:
502
+ module.weight.data[module.padding_idx].zero_()
503
+ elif isinstance(module, nn.LayerNorm):
504
+ module.bias.data.zero_()
505
+ module.weight.data.fill_(1.0)
506
+
507
+
508
+ def my_convert_sync_batchnorm(module, process_group=None):
509
+ # same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
510
+ if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
511
+ return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
512
+ module_output = module
513
+ if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
514
+ module_output = torch.nn.SyncBatchNorm(
515
+ num_features=module.num_features,
516
+ eps=module.eps,
517
+ affine=True,
518
+ track_running_stats=True,
519
+ process_group=process_group,
520
+ )
521
+ module_output.weight = torch.nn.Parameter(module.weight)
522
+ module_output.bias = torch.nn.Parameter(module.bias)
523
+ module_output.running_mean = module.running_mean
524
+ module_output.running_var = module.running_var
525
+ module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
526
+ for name, child in module.named_children():
527
+ module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
528
+ del module
529
+ return module_output
530
+
531
+
532
+ class LayoutLMv2VisualBackbone(nn.Module):
533
+ def __init__(self, config):
534
+ super().__init__()
535
+ self.cfg = config.get_detectron2_config()
536
+ meta_arch = self.cfg.MODEL.META_ARCHITECTURE
537
+ model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
538
+ assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
539
+ self.backbone = model.backbone
540
+
541
+ assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
542
+ num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
543
+ self.register_buffer(
544
+ "pixel_mean",
545
+ torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),
546
+ persistent=False,
547
+ )
548
+ self.register_buffer(
549
+ "pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1), persistent=False
550
+ )
551
+ self.out_feature_key = "p2"
552
+ if torch.are_deterministic_algorithms_enabled():
553
+ logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`")
554
+ input_shape = (224, 224)
555
+ backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
556
+ self.pool = nn.AvgPool2d(
557
+ (
558
+ math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),
559
+ math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),
560
+ )
561
+ )
562
+ else:
563
+ self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
564
+ if len(config.image_feature_pool_shape) == 2:
565
+ config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
566
+ assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
567
+
568
+ def forward(self, images):
569
+ images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std
570
+ features = self.backbone(images_input)
571
+ features = features[self.out_feature_key]
572
+ features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()
573
+ return features
574
+
575
+ def synchronize_batch_norm(self):
576
+ if not (
577
+ torch.distributed.is_available()
578
+ and torch.distributed.is_initialized()
579
+ and torch.distributed.get_rank() > -1
580
+ ):
581
+ raise RuntimeError("Make sure torch.distributed is set up properly.")
582
+
583
+ self_rank = torch.distributed.get_rank()
584
+ node_size = torch.cuda.device_count()
585
+ world_size = torch.distributed.get_world_size()
586
+ if not (world_size % node_size == 0):
587
+ raise RuntimeError("Make sure the number of processes can be divided by the number of nodes")
588
+
589
+ node_global_ranks = [list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)]
590
+ sync_bn_groups = [
591
+ torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)
592
+ ]
593
+ node_rank = self_rank // node_size
594
+
595
+ self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])
596
+
597
+
598
+ LAYOUTLMV2_START_DOCSTRING = r"""
599
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
600
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
601
+ behavior.
602
+
603
+ Parameters:
604
+ config ([`LayoutLMv2Config`]): Model configuration class with all the parameters of the model.
605
+ Initializing with a config file does not load the weights associated with the model, only the
606
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
607
+ """
608
+
609
+ LAYOUTLMV2_INPUTS_DOCSTRING = r"""
610
+ Args:
611
+ input_ids (`torch.LongTensor` of shape `{0}`):
612
+ Indices of input sequence tokens in the vocabulary.
613
+
614
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
615
+ [`PreTrainedTokenizer.__call__`] for details.
616
+
617
+ [What are input IDs?](../glossary#input-ids)
618
+
619
+ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
620
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
621
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
622
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
623
+ y1) represents the position of the lower right corner.
624
+
625
+ image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):
626
+ Batch of document images.
627
+
628
+ attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
629
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
630
+
631
+ - 1 for tokens that are **not masked**,
632
+ - 0 for tokens that are **masked**.
633
+
634
+ [What are attention masks?](../glossary#attention-mask)
635
+ token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*):
636
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
637
+ 1]`:
638
+
639
+ - 0 corresponds to a *sentence A* token,
640
+ - 1 corresponds to a *sentence B* token.
641
+
642
+ [What are token type IDs?](../glossary#token-type-ids)
643
+ position_ids (`torch.LongTensor` of shape `{0}`, *optional*):
644
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
645
+ config.max_position_embeddings - 1]`.
646
+
647
+ [What are position IDs?](../glossary#position-ids)
648
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
649
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
650
+
651
+ - 1 indicates the head is **not masked**,
652
+ - 0 indicates the head is **masked**.
653
+
654
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
655
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
656
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
657
+ model's internal embedding lookup matrix.
658
+ output_attentions (`bool`, *optional*):
659
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
660
+ tensors for more detail.
661
+ output_hidden_states (`bool`, *optional*):
662
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
663
+ more detail.
664
+ return_dict (`bool`, *optional*):
665
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
666
+ """
667
+
668
+
669
+ class LayoutLMv2Pooler(nn.Module):
670
+ def __init__(self, config):
671
+ super().__init__()
672
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
673
+ self.activation = nn.Tanh()
674
+
675
+ def forward(self, hidden_states):
676
+ # We "pool" the model by simply taking the hidden state corresponding
677
+ # to the first token.
678
+ first_token_tensor = hidden_states[:, 0]
679
+ pooled_output = self.dense(first_token_tensor)
680
+ pooled_output = self.activation(pooled_output)
681
+ return pooled_output
682
+
683
+
684
+ @add_start_docstrings(
685
+ "The bare LayoutLMv2 Model transformer outputting raw hidden-states without any specific head on top.",
686
+ LAYOUTLMV2_START_DOCSTRING,
687
+ )
688
+ class LayoutLMv2Model(LayoutLMv2PreTrainedModel):
689
+ def __init__(self, config):
690
+ requires_backends(self, "detectron2")
691
+ super().__init__(config)
692
+ self.config = config
693
+ self.has_visual_segment_embedding = config.has_visual_segment_embedding
694
+ self.embeddings = LayoutLMv2Embeddings(config)
695
+
696
+ self.visual = LayoutLMv2VisualBackbone(config)
697
+ self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)
698
+ if self.has_visual_segment_embedding:
699
+ self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])
700
+ self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
701
+ self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)
702
+
703
+ self.encoder = LayoutLMv2Encoder(config)
704
+ self.pooler = LayoutLMv2Pooler(config)
705
+
706
+ # Initialize weights and apply final processing
707
+ self.post_init()
708
+
709
+ def get_input_embeddings(self):
710
+ return self.embeddings.word_embeddings
711
+
712
+ def set_input_embeddings(self, value):
713
+ self.embeddings.word_embeddings = value
714
+
715
+ def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids, inputs_embeds=None):
716
+ if input_ids is not None:
717
+ input_shape = input_ids.size()
718
+ else:
719
+ input_shape = inputs_embeds.size()[:-1]
720
+
721
+ seq_length = input_shape[1]
722
+
723
+ if position_ids is None:
724
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
725
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
726
+ if token_type_ids is None:
727
+ token_type_ids = torch.zeros_like(input_ids)
728
+
729
+ if inputs_embeds is None:
730
+ inputs_embeds = self.embeddings.word_embeddings(input_ids)
731
+ position_embeddings = self.embeddings.position_embeddings(position_ids)
732
+ spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)
733
+ token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)
734
+
735
+ embeddings = inputs_embeds + position_embeddings + spatial_position_embeddings + token_type_embeddings
736
+ embeddings = self.embeddings.LayerNorm(embeddings)
737
+ embeddings = self.embeddings.dropout(embeddings)
738
+ return embeddings
739
+
740
+ def _calc_img_embeddings(self, image, bbox, position_ids):
741
+ visual_embeddings = self.visual_proj(self.visual(image))
742
+ position_embeddings = self.embeddings.position_embeddings(position_ids)
743
+ spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)
744
+ embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
745
+ if self.has_visual_segment_embedding:
746
+ embeddings += self.visual_segment_embedding
747
+ embeddings = self.visual_LayerNorm(embeddings)
748
+ embeddings = self.visual_dropout(embeddings)
749
+ return embeddings
750
+
751
+ def _calc_visual_bbox(self, image_feature_pool_shape, bbox, device, final_shape):
752
+ visual_bbox_x = torch.div(
753
+ torch.arange(
754
+ 0,
755
+ 1000 * (image_feature_pool_shape[1] + 1),
756
+ 1000,
757
+ device=device,
758
+ dtype=bbox.dtype,
759
+ ),
760
+ self.config.image_feature_pool_shape[1],
761
+ rounding_mode="floor",
762
+ )
763
+ visual_bbox_y = torch.div(
764
+ torch.arange(
765
+ 0,
766
+ 1000 * (self.config.image_feature_pool_shape[0] + 1),
767
+ 1000,
768
+ device=device,
769
+ dtype=bbox.dtype,
770
+ ),
771
+ self.config.image_feature_pool_shape[0],
772
+ rounding_mode="floor",
773
+ )
774
+ visual_bbox = torch.stack(
775
+ [
776
+ visual_bbox_x[:-1].repeat(image_feature_pool_shape[0], 1),
777
+ visual_bbox_y[:-1].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
778
+ visual_bbox_x[1:].repeat(image_feature_pool_shape[0], 1),
779
+ visual_bbox_y[1:].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
780
+ ],
781
+ dim=-1,
782
+ ).view(-1, bbox.size(-1))
783
+
784
+ visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)
785
+
786
+ return visual_bbox
787
+
788
+ def _get_input_shape(self, input_ids=None, inputs_embeds=None):
789
+ if input_ids is not None and inputs_embeds is not None:
790
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
791
+ elif input_ids is not None:
792
+ return input_ids.size()
793
+ elif inputs_embeds is not None:
794
+ return inputs_embeds.size()[:-1]
795
+ else:
796
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
797
+
798
+ @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
799
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
800
+ def forward(
801
+ self,
802
+ input_ids: Optional[torch.LongTensor] = None,
803
+ bbox: Optional[torch.LongTensor] = None,
804
+ image: Optional[torch.FloatTensor] = None,
805
+ attention_mask: Optional[torch.FloatTensor] = None,
806
+ token_type_ids: Optional[torch.LongTensor] = None,
807
+ position_ids: Optional[torch.LongTensor] = None,
808
+ head_mask: Optional[torch.FloatTensor] = None,
809
+ inputs_embeds: Optional[torch.FloatTensor] = None,
810
+ output_attentions: Optional[bool] = None,
811
+ output_hidden_states: Optional[bool] = None,
812
+ return_dict: Optional[bool] = None,
813
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
814
+ r"""
815
+ Return:
816
+
817
+ Examples:
818
+
819
+ ```python
820
+ >>> from transformers import AutoProcessor, LayoutLMv2Model, set_seed
821
+ >>> from PIL import Image
822
+ >>> import torch
823
+ >>> from datasets import load_dataset
824
+
825
+ >>> set_seed(88)
826
+
827
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
828
+ >>> model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased")
829
+
830
+
831
+ >>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
832
+ >>> image_path = dataset["test"][0]["file"]
833
+ >>> image = Image.open(image_path).convert("RGB")
834
+
835
+ >>> encoding = processor(image, return_tensors="pt")
836
+
837
+ >>> outputs = model(**encoding)
838
+ >>> last_hidden_states = outputs.last_hidden_state
839
+
840
+ >>> last_hidden_states.shape
841
+ torch.Size([1, 342, 768])
842
+ ```
843
+ """
844
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
845
+ output_hidden_states = (
846
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
847
+ )
848
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
849
+
850
+ input_shape = self._get_input_shape(input_ids, inputs_embeds)
851
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
852
+
853
+ visual_shape = list(input_shape)
854
+ visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
855
+ visual_shape = torch.Size(visual_shape)
856
+ # needs a new copy of input_shape for tracing. Otherwise wrong dimensions will occur
857
+ final_shape = list(self._get_input_shape(input_ids, inputs_embeds))
858
+ final_shape[1] += visual_shape[1]
859
+ final_shape = torch.Size(final_shape)
860
+
861
+ visual_bbox = self._calc_visual_bbox(self.config.image_feature_pool_shape, bbox, device, final_shape)
862
+ final_bbox = torch.cat([bbox, visual_bbox], dim=1)
863
+
864
+ if attention_mask is None:
865
+ attention_mask = torch.ones(input_shape, device=device)
866
+
867
+ visual_attention_mask = torch.ones(visual_shape, device=device)
868
+ final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
869
+
870
+ if token_type_ids is None:
871
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
872
+
873
+ if position_ids is None:
874
+ seq_length = input_shape[1]
875
+ position_ids = self.embeddings.position_ids[:, :seq_length]
876
+ position_ids = position_ids.expand(input_shape)
877
+
878
+ visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
879
+ input_shape[0], 1
880
+ )
881
+ final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
882
+
883
+ if bbox is None:
884
+ bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
885
+
886
+ text_layout_emb = self._calc_text_embeddings(
887
+ input_ids=input_ids,
888
+ bbox=bbox,
889
+ token_type_ids=token_type_ids,
890
+ position_ids=position_ids,
891
+ inputs_embeds=inputs_embeds,
892
+ )
893
+
894
+ visual_emb = self._calc_img_embeddings(
895
+ image=image,
896
+ bbox=visual_bbox,
897
+ position_ids=visual_position_ids,
898
+ )
899
+ final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
900
+
901
+ extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
902
+
903
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
904
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
905
+
906
+ if head_mask is not None:
907
+ if head_mask.dim() == 1:
908
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
909
+ head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
910
+ elif head_mask.dim() == 2:
911
+ head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
912
+ head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
913
+ else:
914
+ head_mask = [None] * self.config.num_hidden_layers
915
+
916
+ encoder_outputs = self.encoder(
917
+ final_emb,
918
+ extended_attention_mask,
919
+ bbox=final_bbox,
920
+ position_ids=final_position_ids,
921
+ head_mask=head_mask,
922
+ output_attentions=output_attentions,
923
+ output_hidden_states=output_hidden_states,
924
+ return_dict=return_dict,
925
+ )
926
+ sequence_output = encoder_outputs[0]
927
+ pooled_output = self.pooler(sequence_output)
928
+
929
+ if not return_dict:
930
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
931
+
932
+ return BaseModelOutputWithPooling(
933
+ last_hidden_state=sequence_output,
934
+ pooler_output=pooled_output,
935
+ hidden_states=encoder_outputs.hidden_states,
936
+ attentions=encoder_outputs.attentions,
937
+ )
938
+
939
+
940
+ @add_start_docstrings(
941
+ """
942
+ LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the
943
+ final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual
944
+ embeddings, e.g. for document image classification tasks such as the
945
+ [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
946
+ """,
947
+ LAYOUTLMV2_START_DOCSTRING,
948
+ )
949
+ class LayoutLMv2ForSequenceClassification(LayoutLMv2PreTrainedModel):
950
+ def __init__(self, config):
951
+ super().__init__(config)
952
+ self.num_labels = config.num_labels
953
+ self.layoutlmv2 = LayoutLMv2Model(config)
954
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
955
+ self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)
956
+
957
+ # Initialize weights and apply final processing
958
+ self.post_init()
959
+
960
+ def get_input_embeddings(self):
961
+ return self.layoutlmv2.embeddings.word_embeddings
962
+
963
+ @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
964
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
965
+ def forward(
966
+ self,
967
+ input_ids: Optional[torch.LongTensor] = None,
968
+ bbox: Optional[torch.LongTensor] = None,
969
+ image: Optional[torch.FloatTensor] = None,
970
+ attention_mask: Optional[torch.FloatTensor] = None,
971
+ token_type_ids: Optional[torch.LongTensor] = None,
972
+ position_ids: Optional[torch.LongTensor] = None,
973
+ head_mask: Optional[torch.FloatTensor] = None,
974
+ inputs_embeds: Optional[torch.FloatTensor] = None,
975
+ labels: Optional[torch.LongTensor] = None,
976
+ output_attentions: Optional[bool] = None,
977
+ output_hidden_states: Optional[bool] = None,
978
+ return_dict: Optional[bool] = None,
979
+ ) -> Union[Tuple, SequenceClassifierOutput]:
980
+ r"""
981
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
982
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
983
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
984
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
985
+
986
+ Returns:
987
+
988
+ Example:
989
+
990
+ ```python
991
+ >>> from transformers import AutoProcessor, LayoutLMv2ForSequenceClassification, set_seed
992
+ >>> from PIL import Image
993
+ >>> import torch
994
+ >>> from datasets import load_dataset
995
+
996
+ >>> set_seed(88)
997
+
998
+ >>> dataset = load_dataset("rvl_cdip", split="train", streaming=True)
999
+ >>> data = next(iter(dataset))
1000
+ >>> image = data["image"].convert("RGB")
1001
+
1002
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
1003
+ >>> model = LayoutLMv2ForSequenceClassification.from_pretrained(
1004
+ ... "microsoft/layoutlmv2-base-uncased", num_labels=dataset.info.features["label"].num_classes
1005
+ ... )
1006
+
1007
+ >>> encoding = processor(image, return_tensors="pt")
1008
+ >>> sequence_label = torch.tensor([data["label"]])
1009
+
1010
+ >>> outputs = model(**encoding, labels=sequence_label)
1011
+
1012
+ >>> loss, logits = outputs.loss, outputs.logits
1013
+ >>> predicted_idx = logits.argmax(dim=-1).item()
1014
+ >>> predicted_answer = dataset.info.features["label"].names[4]
1015
+ >>> predicted_idx, predicted_answer
1016
+ (4, 'advertisement')
1017
+ ```
1018
+ """
1019
+
1020
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1021
+
1022
+ if input_ids is not None and inputs_embeds is not None:
1023
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1024
+ elif input_ids is not None:
1025
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1026
+ input_shape = input_ids.size()
1027
+ elif inputs_embeds is not None:
1028
+ input_shape = inputs_embeds.size()[:-1]
1029
+ else:
1030
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1031
+
1032
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1033
+
1034
+ visual_shape = list(input_shape)
1035
+ visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
1036
+ visual_shape = torch.Size(visual_shape)
1037
+ final_shape = list(input_shape)
1038
+ final_shape[1] += visual_shape[1]
1039
+ final_shape = torch.Size(final_shape)
1040
+
1041
+ visual_bbox = self.layoutlmv2._calc_visual_bbox(
1042
+ self.config.image_feature_pool_shape, bbox, device, final_shape
1043
+ )
1044
+
1045
+ visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
1046
+ input_shape[0], 1
1047
+ )
1048
+
1049
+ initial_image_embeddings = self.layoutlmv2._calc_img_embeddings(
1050
+ image=image,
1051
+ bbox=visual_bbox,
1052
+ position_ids=visual_position_ids,
1053
+ )
1054
+
1055
+ outputs = self.layoutlmv2(
1056
+ input_ids=input_ids,
1057
+ bbox=bbox,
1058
+ image=image,
1059
+ attention_mask=attention_mask,
1060
+ token_type_ids=token_type_ids,
1061
+ position_ids=position_ids,
1062
+ head_mask=head_mask,
1063
+ inputs_embeds=inputs_embeds,
1064
+ output_attentions=output_attentions,
1065
+ output_hidden_states=output_hidden_states,
1066
+ return_dict=return_dict,
1067
+ )
1068
+ if input_ids is not None:
1069
+ input_shape = input_ids.size()
1070
+ else:
1071
+ input_shape = inputs_embeds.size()[:-1]
1072
+
1073
+ seq_length = input_shape[1]
1074
+ sequence_output, final_image_embeddings = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
1075
+
1076
+ cls_final_output = sequence_output[:, 0, :]
1077
+
1078
+ # average-pool the visual embeddings
1079
+ pooled_initial_image_embeddings = initial_image_embeddings.mean(dim=1)
1080
+ pooled_final_image_embeddings = final_image_embeddings.mean(dim=1)
1081
+ # concatenate with cls_final_output
1082
+ sequence_output = torch.cat(
1083
+ [cls_final_output, pooled_initial_image_embeddings, pooled_final_image_embeddings], dim=1
1084
+ )
1085
+ sequence_output = self.dropout(sequence_output)
1086
+ logits = self.classifier(sequence_output)
1087
+
1088
+ loss = None
1089
+ if labels is not None:
1090
+ if self.config.problem_type is None:
1091
+ if self.num_labels == 1:
1092
+ self.config.problem_type = "regression"
1093
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1094
+ self.config.problem_type = "single_label_classification"
1095
+ else:
1096
+ self.config.problem_type = "multi_label_classification"
1097
+
1098
+ if self.config.problem_type == "regression":
1099
+ loss_fct = MSELoss()
1100
+ if self.num_labels == 1:
1101
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1102
+ else:
1103
+ loss = loss_fct(logits, labels)
1104
+ elif self.config.problem_type == "single_label_classification":
1105
+ loss_fct = CrossEntropyLoss()
1106
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1107
+ elif self.config.problem_type == "multi_label_classification":
1108
+ loss_fct = BCEWithLogitsLoss()
1109
+ loss = loss_fct(logits, labels)
1110
+ if not return_dict:
1111
+ output = (logits,) + outputs[2:]
1112
+ return ((loss,) + output) if loss is not None else output
1113
+
1114
+ return SequenceClassifierOutput(
1115
+ loss=loss,
1116
+ logits=logits,
1117
+ hidden_states=outputs.hidden_states,
1118
+ attentions=outputs.attentions,
1119
+ )
1120
+
1121
+
1122
+ @add_start_docstrings(
1123
+ """
1124
+ LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden
1125
+ states) e.g. for sequence labeling (information extraction) tasks such as
1126
+ [FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13),
1127
+ [CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda).
1128
+ """,
1129
+ LAYOUTLMV2_START_DOCSTRING,
1130
+ )
1131
+ class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
1132
+ def __init__(self, config):
1133
+ super().__init__(config)
1134
+ self.num_labels = config.num_labels
1135
+ self.layoutlmv2 = LayoutLMv2Model(config)
1136
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1137
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1138
+
1139
+ # Initialize weights and apply final processing
1140
+ self.post_init()
1141
+
1142
+ def get_input_embeddings(self):
1143
+ return self.layoutlmv2.embeddings.word_embeddings
1144
+
1145
+ @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1146
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1147
+ def forward(
1148
+ self,
1149
+ input_ids: Optional[torch.LongTensor] = None,
1150
+ bbox: Optional[torch.LongTensor] = None,
1151
+ image: Optional[torch.FloatTensor] = None,
1152
+ attention_mask: Optional[torch.FloatTensor] = None,
1153
+ token_type_ids: Optional[torch.LongTensor] = None,
1154
+ position_ids: Optional[torch.LongTensor] = None,
1155
+ head_mask: Optional[torch.FloatTensor] = None,
1156
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1157
+ labels: Optional[torch.LongTensor] = None,
1158
+ output_attentions: Optional[bool] = None,
1159
+ output_hidden_states: Optional[bool] = None,
1160
+ return_dict: Optional[bool] = None,
1161
+ ) -> Union[Tuple, TokenClassifierOutput]:
1162
+ r"""
1163
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1164
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1165
+
1166
+ Returns:
1167
+
1168
+ Example:
1169
+
1170
+ ```python
1171
+ >>> from transformers import AutoProcessor, LayoutLMv2ForTokenClassification, set_seed
1172
+ >>> from PIL import Image
1173
+ >>> from datasets import load_dataset
1174
+
1175
+ >>> set_seed(88)
1176
+
1177
+ >>> datasets = load_dataset("nielsr/funsd", split="test")
1178
+ >>> labels = datasets.features["ner_tags"].feature.names
1179
+ >>> id2label = {v: k for v, k in enumerate(labels)}
1180
+
1181
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
1182
+ >>> model = LayoutLMv2ForTokenClassification.from_pretrained(
1183
+ ... "microsoft/layoutlmv2-base-uncased", num_labels=len(labels)
1184
+ ... )
1185
+
1186
+ >>> data = datasets[0]
1187
+ >>> image = Image.open(data["image_path"]).convert("RGB")
1188
+ >>> words = data["words"]
1189
+ >>> boxes = data["bboxes"] # make sure to normalize your bounding boxes
1190
+ >>> word_labels = data["ner_tags"]
1191
+ >>> encoding = processor(
1192
+ ... image,
1193
+ ... words,
1194
+ ... boxes=boxes,
1195
+ ... word_labels=word_labels,
1196
+ ... padding="max_length",
1197
+ ... truncation=True,
1198
+ ... return_tensors="pt",
1199
+ ... )
1200
+
1201
+ >>> outputs = model(**encoding)
1202
+ >>> logits, loss = outputs.logits, outputs.loss
1203
+
1204
+ >>> predicted_token_class_ids = logits.argmax(-1)
1205
+ >>> predicted_tokens_classes = [id2label[t.item()] for t in predicted_token_class_ids[0]]
1206
+ >>> predicted_tokens_classes[:5]
1207
+ ['B-ANSWER', 'B-HEADER', 'B-HEADER', 'B-HEADER', 'B-HEADER']
1208
+ ```
1209
+ """
1210
+
1211
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1212
+
1213
+ outputs = self.layoutlmv2(
1214
+ input_ids=input_ids,
1215
+ bbox=bbox,
1216
+ image=image,
1217
+ attention_mask=attention_mask,
1218
+ token_type_ids=token_type_ids,
1219
+ position_ids=position_ids,
1220
+ head_mask=head_mask,
1221
+ inputs_embeds=inputs_embeds,
1222
+ output_attentions=output_attentions,
1223
+ output_hidden_states=output_hidden_states,
1224
+ return_dict=return_dict,
1225
+ )
1226
+ if input_ids is not None:
1227
+ input_shape = input_ids.size()
1228
+ else:
1229
+ input_shape = inputs_embeds.size()[:-1]
1230
+
1231
+ seq_length = input_shape[1]
1232
+ # only take the text part of the output representations
1233
+ sequence_output = outputs[0][:, :seq_length]
1234
+ sequence_output = self.dropout(sequence_output)
1235
+ logits = self.classifier(sequence_output)
1236
+
1237
+ loss = None
1238
+ if labels is not None:
1239
+ loss_fct = CrossEntropyLoss()
1240
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1241
+
1242
+ if not return_dict:
1243
+ output = (logits,) + outputs[2:]
1244
+ return ((loss,) + output) if loss is not None else output
1245
+
1246
+ return TokenClassifierOutput(
1247
+ loss=loss,
1248
+ logits=logits,
1249
+ hidden_states=outputs.hidden_states,
1250
+ attentions=outputs.attentions,
1251
+ )
1252
+
1253
+
1254
+ @add_start_docstrings(
1255
+ """
1256
+ LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as
1257
+ [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
1258
+ compute `span start logits` and `span end logits`).
1259
+ """,
1260
+ LAYOUTLMV2_START_DOCSTRING,
1261
+ )
1262
+ class LayoutLMv2ForQuestionAnswering(LayoutLMv2PreTrainedModel):
1263
+ def __init__(self, config, has_visual_segment_embedding=True):
1264
+ super().__init__(config)
1265
+ self.num_labels = config.num_labels
1266
+ config.has_visual_segment_embedding = has_visual_segment_embedding
1267
+ self.layoutlmv2 = LayoutLMv2Model(config)
1268
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1269
+
1270
+ # Initialize weights and apply final processing
1271
+ self.post_init()
1272
+
1273
+ def get_input_embeddings(self):
1274
+ return self.layoutlmv2.embeddings.word_embeddings
1275
+
1276
+ @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1277
+ @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
1278
+ def forward(
1279
+ self,
1280
+ input_ids: Optional[torch.LongTensor] = None,
1281
+ bbox: Optional[torch.LongTensor] = None,
1282
+ image: Optional[torch.FloatTensor] = None,
1283
+ attention_mask: Optional[torch.FloatTensor] = None,
1284
+ token_type_ids: Optional[torch.LongTensor] = None,
1285
+ position_ids: Optional[torch.LongTensor] = None,
1286
+ head_mask: Optional[torch.FloatTensor] = None,
1287
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1288
+ start_positions: Optional[torch.LongTensor] = None,
1289
+ end_positions: Optional[torch.LongTensor] = None,
1290
+ output_attentions: Optional[bool] = None,
1291
+ output_hidden_states: Optional[bool] = None,
1292
+ return_dict: Optional[bool] = None,
1293
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1294
+ r"""
1295
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1296
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1297
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1298
+ are not taken into account for computing the loss.
1299
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1300
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1301
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1302
+ are not taken into account for computing the loss.
1303
+
1304
+ Returns:
1305
+
1306
+ Example:
1307
+
1308
+ In this example below, we give the LayoutLMv2 model an image (of texts) and ask it a question. It will give us
1309
+ a prediction of what it thinks the answer is (the span of the answer within the texts parsed from the image).
1310
+
1311
+ ```python
1312
+ >>> from transformers import AutoProcessor, LayoutLMv2ForQuestionAnswering, set_seed
1313
+ >>> import torch
1314
+ >>> from PIL import Image
1315
+ >>> from datasets import load_dataset
1316
+
1317
+ >>> set_seed(88)
1318
+ >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
1319
+ >>> model = LayoutLMv2ForQuestionAnswering.from_pretrained("microsoft/layoutlmv2-base-uncased")
1320
+
1321
+ >>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
1322
+ >>> image_path = dataset["test"][0]["file"]
1323
+ >>> image = Image.open(image_path).convert("RGB")
1324
+ >>> question = "When is coffee break?"
1325
+ >>> encoding = processor(image, question, return_tensors="pt")
1326
+
1327
+ >>> outputs = model(**encoding)
1328
+ >>> predicted_start_idx = outputs.start_logits.argmax(-1).item()
1329
+ >>> predicted_end_idx = outputs.end_logits.argmax(-1).item()
1330
+ >>> predicted_start_idx, predicted_end_idx
1331
+ (154, 287)
1332
+
1333
+ >>> predicted_answer_tokens = encoding.input_ids.squeeze()[predicted_start_idx : predicted_end_idx + 1]
1334
+ >>> predicted_answer = processor.tokenizer.decode(predicted_answer_tokens)
1335
+ >>> predicted_answer # results are not very good without further fine-tuning
1336
+ 'council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public ...
1337
+ ```
1338
+
1339
+ ```python
1340
+ >>> target_start_index = torch.tensor([7])
1341
+ >>> target_end_index = torch.tensor([14])
1342
+ >>> outputs = model(**encoding, start_positions=target_start_index, end_positions=target_end_index)
1343
+ >>> predicted_answer_span_start = outputs.start_logits.argmax(-1).item()
1344
+ >>> predicted_answer_span_end = outputs.end_logits.argmax(-1).item()
1345
+ >>> predicted_answer_span_start, predicted_answer_span_end
1346
+ (154, 287)
1347
+ ```
1348
+ """
1349
+
1350
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1351
+
1352
+ outputs = self.layoutlmv2(
1353
+ input_ids=input_ids,
1354
+ bbox=bbox,
1355
+ image=image,
1356
+ attention_mask=attention_mask,
1357
+ token_type_ids=token_type_ids,
1358
+ position_ids=position_ids,
1359
+ head_mask=head_mask,
1360
+ inputs_embeds=inputs_embeds,
1361
+ output_attentions=output_attentions,
1362
+ output_hidden_states=output_hidden_states,
1363
+ return_dict=return_dict,
1364
+ )
1365
+
1366
+ if input_ids is not None:
1367
+ input_shape = input_ids.size()
1368
+ else:
1369
+ input_shape = inputs_embeds.size()[:-1]
1370
+
1371
+ seq_length = input_shape[1]
1372
+ # only take the text part of the output representations
1373
+ sequence_output = outputs[0][:, :seq_length]
1374
+
1375
+ logits = self.qa_outputs(sequence_output)
1376
+ start_logits, end_logits = logits.split(1, dim=-1)
1377
+ start_logits = start_logits.squeeze(-1).contiguous()
1378
+ end_logits = end_logits.squeeze(-1).contiguous()
1379
+
1380
+ total_loss = None
1381
+ if start_positions is not None and end_positions is not None:
1382
+ # If we are on multi-GPU, split add a dimension
1383
+ if len(start_positions.size()) > 1:
1384
+ start_positions = start_positions.squeeze(-1)
1385
+ if len(end_positions.size()) > 1:
1386
+ end_positions = end_positions.squeeze(-1)
1387
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1388
+ ignored_index = start_logits.size(1)
1389
+ start_positions = start_positions.clamp(0, ignored_index)
1390
+ end_positions = end_positions.clamp(0, ignored_index)
1391
+
1392
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1393
+ start_loss = loss_fct(start_logits, start_positions)
1394
+ end_loss = loss_fct(end_logits, end_positions)
1395
+ total_loss = (start_loss + end_loss) / 2
1396
+
1397
+ if not return_dict:
1398
+ output = (start_logits, end_logits) + outputs[2:]
1399
+ return ((total_loss,) + output) if total_loss is not None else output
1400
+
1401
+ return QuestionAnsweringModelOutput(
1402
+ loss=total_loss,
1403
+ start_logits=start_logits,
1404
+ end_logits=end_logits,
1405
+ hidden_states=outputs.hidden_states,
1406
+ attentions=outputs.attentions,
1407
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/processing_layoutlmv2.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for LayoutLMv2.
17
+ """
18
+
19
+ import warnings
20
+ from typing import List, Optional, Union
21
+
22
+ from ...processing_utils import ProcessorMixin
23
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
24
+ from ...utils import TensorType
25
+
26
+
27
+ class LayoutLMv2Processor(ProcessorMixin):
28
+ r"""
29
+ Constructs a LayoutLMv2 processor which combines a LayoutLMv2 image processor and a LayoutLMv2 tokenizer into a
30
+ single processor.
31
+
32
+ [`LayoutLMv2Processor`] offers all the functionalities you need to prepare data for the model.
33
+
34
+ It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
35
+ get words and normalized bounding boxes. These are then provided to [`LayoutLMv2Tokenizer`] or
36
+ [`LayoutLMv2TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
37
+ `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
38
+ into token-level `labels` for token classification tasks (such as FUNSD, CORD).
39
+
40
+ Args:
41
+ image_processor (`LayoutLMv2ImageProcessor`, *optional*):
42
+ An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
43
+ tokenizer (`LayoutLMv2Tokenizer` or `LayoutLMv2TokenizerFast`, *optional*):
44
+ An instance of [`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`]. The tokenizer is a required input.
45
+ """
46
+
47
+ attributes = ["image_processor", "tokenizer"]
48
+ image_processor_class = "LayoutLMv2ImageProcessor"
49
+ tokenizer_class = ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast")
50
+
51
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
52
+ feature_extractor = None
53
+ if "feature_extractor" in kwargs:
54
+ warnings.warn(
55
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
56
+ " instead.",
57
+ FutureWarning,
58
+ )
59
+ feature_extractor = kwargs.pop("feature_extractor")
60
+
61
+ image_processor = image_processor if image_processor is not None else feature_extractor
62
+ if image_processor is None:
63
+ raise ValueError("You need to specify an `image_processor`.")
64
+ if tokenizer is None:
65
+ raise ValueError("You need to specify a `tokenizer`.")
66
+
67
+ super().__init__(image_processor, tokenizer)
68
+
69
+ def __call__(
70
+ self,
71
+ images,
72
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
73
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
74
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
75
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
76
+ add_special_tokens: bool = True,
77
+ padding: Union[bool, str, PaddingStrategy] = False,
78
+ truncation: Union[bool, str, TruncationStrategy] = False,
79
+ max_length: Optional[int] = None,
80
+ stride: int = 0,
81
+ pad_to_multiple_of: Optional[int] = None,
82
+ return_token_type_ids: Optional[bool] = None,
83
+ return_attention_mask: Optional[bool] = None,
84
+ return_overflowing_tokens: bool = False,
85
+ return_special_tokens_mask: bool = False,
86
+ return_offsets_mapping: bool = False,
87
+ return_length: bool = False,
88
+ verbose: bool = True,
89
+ return_tensors: Optional[Union[str, TensorType]] = None,
90
+ **kwargs,
91
+ ) -> BatchEncoding:
92
+ """
93
+ This method first forwards the `images` argument to [`~LayoutLMv2ImageProcessor.__call__`]. In case
94
+ [`LayoutLMv2ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
95
+ bounding boxes along with the additional arguments to [`~LayoutLMv2Tokenizer.__call__`] and returns the output,
96
+ together with resized `images`. In case [`LayoutLMv2ImageProcessor`] was initialized with `apply_ocr` set to
97
+ `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
98
+ arguments to [`~LayoutLMv2Tokenizer.__call__`] and returns the output, together with resized `images``.
99
+
100
+ Please refer to the docstring of the above two methods for more information.
101
+ """
102
+ # verify input
103
+ if self.image_processor.apply_ocr and (boxes is not None):
104
+ raise ValueError(
105
+ "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True."
106
+ )
107
+
108
+ if self.image_processor.apply_ocr and (word_labels is not None):
109
+ raise ValueError(
110
+ "You cannot provide word labels if you initialized the image processor with apply_ocr set to True."
111
+ )
112
+
113
+ if return_overflowing_tokens is True and return_offsets_mapping is False:
114
+ raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
115
+
116
+ # first, apply the image processor
117
+ features = self.image_processor(images=images, return_tensors=return_tensors)
118
+
119
+ # second, apply the tokenizer
120
+ if text is not None and self.image_processor.apply_ocr and text_pair is None:
121
+ if isinstance(text, str):
122
+ text = [text] # add batch dimension (as the image processor always adds a batch dimension)
123
+ text_pair = features["words"]
124
+
125
+ encoded_inputs = self.tokenizer(
126
+ text=text if text is not None else features["words"],
127
+ text_pair=text_pair if text_pair is not None else None,
128
+ boxes=boxes if boxes is not None else features["boxes"],
129
+ word_labels=word_labels,
130
+ add_special_tokens=add_special_tokens,
131
+ padding=padding,
132
+ truncation=truncation,
133
+ max_length=max_length,
134
+ stride=stride,
135
+ pad_to_multiple_of=pad_to_multiple_of,
136
+ return_token_type_ids=return_token_type_ids,
137
+ return_attention_mask=return_attention_mask,
138
+ return_overflowing_tokens=return_overflowing_tokens,
139
+ return_special_tokens_mask=return_special_tokens_mask,
140
+ return_offsets_mapping=return_offsets_mapping,
141
+ return_length=return_length,
142
+ verbose=verbose,
143
+ return_tensors=return_tensors,
144
+ **kwargs,
145
+ )
146
+
147
+ # add pixel values
148
+ images = features.pop("pixel_values")
149
+ if return_overflowing_tokens is True:
150
+ images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"])
151
+ encoded_inputs["image"] = images
152
+
153
+ return encoded_inputs
154
+
155
+ def get_overflowing_images(self, images, overflow_to_sample_mapping):
156
+ # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
157
+ images_with_overflow = []
158
+ for sample_idx in overflow_to_sample_mapping:
159
+ images_with_overflow.append(images[sample_idx])
160
+
161
+ if len(images_with_overflow) != len(overflow_to_sample_mapping):
162
+ raise ValueError(
163
+ "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
164
+ f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}"
165
+ )
166
+
167
+ return images_with_overflow
168
+
169
+ def batch_decode(self, *args, **kwargs):
170
+ """
171
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
172
+ refer to the docstring of this method for more information.
173
+ """
174
+ return self.tokenizer.batch_decode(*args, **kwargs)
175
+
176
+ def decode(self, *args, **kwargs):
177
+ """
178
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
179
+ to the docstring of this method for more information.
180
+ """
181
+ return self.tokenizer.decode(*args, **kwargs)
182
+
183
+ @property
184
+ def model_input_names(self):
185
+ return ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"]
186
+
187
+ @property
188
+ def feature_extractor_class(self):
189
+ warnings.warn(
190
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
191
+ FutureWarning,
192
+ )
193
+ return self.image_processor_class
194
+
195
+ @property
196
+ def feature_extractor(self):
197
+ warnings.warn(
198
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
199
+ FutureWarning,
200
+ )
201
+ return self.image_processor
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/tokenization_layoutlmv2.py ADDED
@@ -0,0 +1,1542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization class for LayoutLMv2."""
16
+
17
+ import collections
18
+ import os
19
+ import sys
20
+ import unicodedata
21
+ from typing import Dict, List, Optional, Tuple, Union
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
24
+ from ...tokenization_utils_base import (
25
+ BatchEncoding,
26
+ EncodedInput,
27
+ PreTokenizedInput,
28
+ TextInput,
29
+ TextInputPair,
30
+ TruncationStrategy,
31
+ )
32
+ from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
38
+
39
+
40
+ LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING = r"""
41
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
42
+ Whether or not to encode the sequences with the special tokens relative to their model.
43
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
44
+ Activates and controls padding. Accepts the following values:
45
+
46
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
47
+ sequence if provided).
48
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
49
+ acceptable input length for the model if that argument is not provided.
50
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
51
+ lengths).
52
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
53
+ Activates and controls truncation. Accepts the following values:
54
+
55
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
56
+ to the maximum acceptable input length for the model if that argument is not provided. This will
57
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
58
+ sequences (or a batch of pairs) is provided.
59
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
60
+ maximum acceptable input length for the model if that argument is not provided. This will only
61
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
62
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
63
+ maximum acceptable input length for the model if that argument is not provided. This will only
64
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
65
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
66
+ greater than the model maximum admissible input size).
67
+ max_length (`int`, *optional*):
68
+ Controls the maximum length to use by one of the truncation/padding parameters.
69
+
70
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
71
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
72
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
73
+ stride (`int`, *optional*, defaults to 0):
74
+ If set to a number along with `max_length`, the overflowing tokens returned when
75
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
76
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
77
+ argument defines the number of overlapping tokens.
78
+ pad_to_multiple_of (`int`, *optional*):
79
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
80
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
81
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
82
+ If set, will return tensors instead of list of python integers. Acceptable values are:
83
+
84
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
85
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
86
+ - `'np'`: Return Numpy `np.ndarray` objects.
87
+ """
88
+
89
+ LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
90
+ return_token_type_ids (`bool`, *optional*):
91
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
92
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
93
+
94
+ [What are token type IDs?](../glossary#token-type-ids)
95
+ return_attention_mask (`bool`, *optional*):
96
+ Whether to return the attention mask. If left to the default, will return the attention mask according
97
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
98
+
99
+ [What are attention masks?](../glossary#attention-mask)
100
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
101
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
102
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
103
+ of returning overflowing tokens.
104
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
105
+ Whether or not to return special tokens mask information.
106
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
107
+ Whether or not to return `(char_start, char_end)` for each token.
108
+
109
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
110
+ Python's tokenizer, this method will raise `NotImplementedError`.
111
+ return_length (`bool`, *optional*, defaults to `False`):
112
+ Whether or not to return the lengths of the encoded inputs.
113
+ verbose (`bool`, *optional*, defaults to `True`):
114
+ Whether or not to print more information and warnings.
115
+ **kwargs: passed to the `self.tokenize()` method
116
+
117
+ Return:
118
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
119
+
120
+ - **input_ids** -- List of token ids to be fed to a model.
121
+
122
+ [What are input IDs?](../glossary#input-ids)
123
+
124
+ - **bbox** -- List of bounding boxes to be fed to a model.
125
+
126
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
127
+ if *"token_type_ids"* is in `self.model_input_names`).
128
+
129
+ [What are token type IDs?](../glossary#token-type-ids)
130
+
131
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
132
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
133
+
134
+ [What are attention masks?](../glossary#attention-mask)
135
+
136
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
137
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
138
+ `return_overflowing_tokens=True`).
139
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
140
+ `return_overflowing_tokens=True`).
141
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
142
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
143
+ - **length** -- The length of the inputs (when `return_length=True`).
144
+ """
145
+
146
+
147
+ def load_vocab(vocab_file):
148
+ """Loads a vocabulary file into a dictionary."""
149
+ vocab = collections.OrderedDict()
150
+ with open(vocab_file, "r", encoding="utf-8") as reader:
151
+ tokens = reader.readlines()
152
+ for index, token in enumerate(tokens):
153
+ token = token.rstrip("\n")
154
+ vocab[token] = index
155
+ return vocab
156
+
157
+
158
+ def whitespace_tokenize(text):
159
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
160
+ text = text.strip()
161
+ if not text:
162
+ return []
163
+ tokens = text.split()
164
+ return tokens
165
+
166
+
167
+ table = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P"))
168
+
169
+
170
+ def subfinder(mylist, pattern):
171
+ matches = []
172
+ indices = []
173
+ for idx, i in enumerate(range(len(mylist))):
174
+ if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern:
175
+ matches.append(pattern)
176
+ indices.append(idx)
177
+ if matches:
178
+ return matches[0], indices[0]
179
+ else:
180
+ return None, 0
181
+
182
+
183
+ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
184
+ r"""
185
+ Construct a LayoutLMv2 tokenizer. Based on WordPiece. [`LayoutLMv2Tokenizer`] can be used to turn words, word-level
186
+ bounding boxes and optional word labels to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and
187
+ optional `labels` (for token classification).
188
+
189
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
190
+ this superclass for more information regarding those methods.
191
+
192
+ [`LayoutLMv2Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
193
+ word-level bounding boxes into token-level bounding boxes.
194
+
195
+ """
196
+
197
+ vocab_files_names = VOCAB_FILES_NAMES
198
+
199
+ def __init__(
200
+ self,
201
+ vocab_file,
202
+ do_lower_case=True,
203
+ do_basic_tokenize=True,
204
+ never_split=None,
205
+ unk_token="[UNK]",
206
+ sep_token="[SEP]",
207
+ pad_token="[PAD]",
208
+ cls_token="[CLS]",
209
+ mask_token="[MASK]",
210
+ cls_token_box=[0, 0, 0, 0],
211
+ sep_token_box=[1000, 1000, 1000, 1000],
212
+ pad_token_box=[0, 0, 0, 0],
213
+ pad_token_label=-100,
214
+ only_label_first_subword=True,
215
+ tokenize_chinese_chars=True,
216
+ strip_accents=None,
217
+ model_max_length: int = 512,
218
+ additional_special_tokens: Optional[List[str]] = None,
219
+ **kwargs,
220
+ ):
221
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
222
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
223
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
224
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
225
+ mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token
226
+
227
+ if not os.path.isfile(vocab_file):
228
+ raise ValueError(
229
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
230
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
231
+ )
232
+ self.vocab = load_vocab(vocab_file)
233
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
234
+ self.do_basic_tokenize = do_basic_tokenize
235
+ if do_basic_tokenize:
236
+ self.basic_tokenizer = BasicTokenizer(
237
+ do_lower_case=do_lower_case,
238
+ never_split=never_split,
239
+ tokenize_chinese_chars=tokenize_chinese_chars,
240
+ strip_accents=strip_accents,
241
+ )
242
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
243
+
244
+ # additional properties
245
+ self.cls_token_box = cls_token_box
246
+ self.sep_token_box = sep_token_box
247
+ self.pad_token_box = pad_token_box
248
+ self.pad_token_label = pad_token_label
249
+ self.only_label_first_subword = only_label_first_subword
250
+ super().__init__(
251
+ do_lower_case=do_lower_case,
252
+ do_basic_tokenize=do_basic_tokenize,
253
+ never_split=never_split,
254
+ unk_token=unk_token,
255
+ sep_token=sep_token,
256
+ pad_token=pad_token,
257
+ cls_token=cls_token,
258
+ mask_token=mask_token,
259
+ cls_token_box=cls_token_box,
260
+ sep_token_box=sep_token_box,
261
+ pad_token_box=pad_token_box,
262
+ pad_token_label=pad_token_label,
263
+ only_label_first_subword=only_label_first_subword,
264
+ tokenize_chinese_chars=tokenize_chinese_chars,
265
+ strip_accents=strip_accents,
266
+ model_max_length=model_max_length,
267
+ additional_special_tokens=additional_special_tokens,
268
+ **kwargs,
269
+ )
270
+
271
+ @property
272
+ def do_lower_case(self):
273
+ return self.basic_tokenizer.do_lower_case
274
+
275
+ @property
276
+ def vocab_size(self):
277
+ return len(self.vocab)
278
+
279
+ def get_vocab(self):
280
+ return dict(self.vocab, **self.added_tokens_encoder)
281
+
282
+ def _tokenize(self, text):
283
+ split_tokens = []
284
+ if self.do_basic_tokenize:
285
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
286
+ # If the token is part of the never_split set
287
+ if token in self.basic_tokenizer.never_split:
288
+ split_tokens.append(token)
289
+ else:
290
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
291
+ else:
292
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
293
+ return split_tokens
294
+
295
+ def _convert_token_to_id(self, token):
296
+ """Converts a token (str) in an id using the vocab."""
297
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
298
+
299
+ def _convert_id_to_token(self, index):
300
+ """Converts an index (integer) in a token (str) using the vocab."""
301
+ return self.ids_to_tokens.get(index, self.unk_token)
302
+
303
+ def convert_tokens_to_string(self, tokens):
304
+ """Converts a sequence of tokens (string) in a single string."""
305
+ out_string = " ".join(tokens).replace(" ##", "").strip()
306
+ return out_string
307
+
308
+ def build_inputs_with_special_tokens(
309
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
310
+ ) -> List[int]:
311
+ """
312
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
313
+ adding special tokens. A BERT sequence has the following format:
314
+
315
+ - single sequence: `[CLS] X [SEP]`
316
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
317
+
318
+ Args:
319
+ token_ids_0 (`List[int]`):
320
+ List of IDs to which the special tokens will be added.
321
+ token_ids_1 (`List[int]`, *optional*):
322
+ Optional second list of IDs for sequence pairs.
323
+
324
+ Returns:
325
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
326
+ """
327
+ if token_ids_1 is None:
328
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
329
+ cls = [self.cls_token_id]
330
+ sep = [self.sep_token_id]
331
+ return cls + token_ids_0 + sep + token_ids_1 + sep
332
+
333
+ def get_special_tokens_mask(
334
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
335
+ ) -> List[int]:
336
+ """
337
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
338
+ special tokens using the tokenizer `prepare_for_model` method.
339
+
340
+ Args:
341
+ token_ids_0 (`List[int]`):
342
+ List of IDs.
343
+ token_ids_1 (`List[int]`, *optional*):
344
+ Optional second list of IDs for sequence pairs.
345
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
346
+ Whether or not the token list is already formatted with special tokens for the model.
347
+
348
+ Returns:
349
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
350
+ """
351
+
352
+ if already_has_special_tokens:
353
+ return super().get_special_tokens_mask(
354
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
355
+ )
356
+
357
+ if token_ids_1 is not None:
358
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
359
+ return [1] + ([0] * len(token_ids_0)) + [1]
360
+
361
+ def create_token_type_ids_from_sequences(
362
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
363
+ ) -> List[int]:
364
+ """
365
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
366
+ pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
367
+ sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
368
+
369
+ Args:
370
+ token_ids_0 (`List[int]`):
371
+ List of IDs.
372
+ token_ids_1 (`List[int]`, *optional*):
373
+ Optional second list of IDs for sequence pairs.
374
+
375
+ Returns:
376
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
377
+ """
378
+ sep = [self.sep_token_id]
379
+ cls = [self.cls_token_id]
380
+ if token_ids_1 is None:
381
+ return len(cls + token_ids_0 + sep) * [0]
382
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
383
+
384
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
385
+ index = 0
386
+ if os.path.isdir(save_directory):
387
+ vocab_file = os.path.join(
388
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
389
+ )
390
+ else:
391
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
392
+ with open(vocab_file, "w", encoding="utf-8") as writer:
393
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
394
+ if index != token_index:
395
+ logger.warning(
396
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
397
+ " Please check that the vocabulary is not corrupted!"
398
+ )
399
+ index = token_index
400
+ writer.write(token + "\n")
401
+ index += 1
402
+ return (vocab_file,)
403
+
404
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
405
+ def __call__(
406
+ self,
407
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
408
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
409
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
410
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
411
+ add_special_tokens: bool = True,
412
+ padding: Union[bool, str, PaddingStrategy] = False,
413
+ truncation: Union[bool, str, TruncationStrategy] = None,
414
+ max_length: Optional[int] = None,
415
+ stride: int = 0,
416
+ pad_to_multiple_of: Optional[int] = None,
417
+ return_tensors: Optional[Union[str, TensorType]] = None,
418
+ return_token_type_ids: Optional[bool] = None,
419
+ return_attention_mask: Optional[bool] = None,
420
+ return_overflowing_tokens: bool = False,
421
+ return_special_tokens_mask: bool = False,
422
+ return_offsets_mapping: bool = False,
423
+ return_length: bool = False,
424
+ verbose: bool = True,
425
+ **kwargs,
426
+ ) -> BatchEncoding:
427
+ """
428
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
429
+ sequences with word-level normalized bounding boxes and optional labels.
430
+
431
+ Args:
432
+ text (`str`, `List[str]`, `List[List[str]]`):
433
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
434
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
435
+ words).
436
+ text_pair (`List[str]`, `List[List[str]]`):
437
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
438
+ (pretokenized string).
439
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
440
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
441
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
442
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
443
+ """
444
+
445
+ # Input type checking for clearer error
446
+ def _is_valid_text_input(t):
447
+ if isinstance(t, str):
448
+ # Strings are fine
449
+ return True
450
+ elif isinstance(t, (list, tuple)):
451
+ # List are fine as long as they are...
452
+ if len(t) == 0:
453
+ # ... empty
454
+ return True
455
+ elif isinstance(t[0], str):
456
+ # ... list of strings
457
+ return True
458
+ elif isinstance(t[0], (list, tuple)):
459
+ # ... list with an empty list or with a list of strings
460
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
461
+ else:
462
+ return False
463
+ else:
464
+ return False
465
+
466
+ if text_pair is not None:
467
+ # in case text + text_pair are provided, text = questions, text_pair = words
468
+ if not _is_valid_text_input(text):
469
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
470
+ if not isinstance(text_pair, (list, tuple)):
471
+ raise ValueError(
472
+ "Words must be of type `List[str]` (single pretokenized example), "
473
+ "or `List[List[str]]` (batch of pretokenized examples)."
474
+ )
475
+ else:
476
+ # in case only text is provided => must be words
477
+ if not isinstance(text, (list, tuple)):
478
+ raise ValueError(
479
+ "Words must be of type `List[str]` (single pretokenized example), "
480
+ "or `List[List[str]]` (batch of pretokenized examples)."
481
+ )
482
+
483
+ if text_pair is not None:
484
+ is_batched = isinstance(text, (list, tuple))
485
+ else:
486
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
487
+
488
+ words = text if text_pair is None else text_pair
489
+ if boxes is None:
490
+ raise ValueError("You must provide corresponding bounding boxes")
491
+ if is_batched:
492
+ if len(words) != len(boxes):
493
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
494
+ for words_example, boxes_example in zip(words, boxes):
495
+ if len(words_example) != len(boxes_example):
496
+ raise ValueError("You must provide as many words as there are bounding boxes")
497
+ else:
498
+ if len(words) != len(boxes):
499
+ raise ValueError("You must provide as many words as there are bounding boxes")
500
+
501
+ if is_batched:
502
+ if text_pair is not None and len(text) != len(text_pair):
503
+ raise ValueError(
504
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
505
+ f" {len(text_pair)}."
506
+ )
507
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
508
+ is_pair = bool(text_pair is not None)
509
+ return self.batch_encode_plus(
510
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
511
+ is_pair=is_pair,
512
+ boxes=boxes,
513
+ word_labels=word_labels,
514
+ add_special_tokens=add_special_tokens,
515
+ padding=padding,
516
+ truncation=truncation,
517
+ max_length=max_length,
518
+ stride=stride,
519
+ pad_to_multiple_of=pad_to_multiple_of,
520
+ return_tensors=return_tensors,
521
+ return_token_type_ids=return_token_type_ids,
522
+ return_attention_mask=return_attention_mask,
523
+ return_overflowing_tokens=return_overflowing_tokens,
524
+ return_special_tokens_mask=return_special_tokens_mask,
525
+ return_offsets_mapping=return_offsets_mapping,
526
+ return_length=return_length,
527
+ verbose=verbose,
528
+ **kwargs,
529
+ )
530
+ else:
531
+ return self.encode_plus(
532
+ text=text,
533
+ text_pair=text_pair,
534
+ boxes=boxes,
535
+ word_labels=word_labels,
536
+ add_special_tokens=add_special_tokens,
537
+ padding=padding,
538
+ truncation=truncation,
539
+ max_length=max_length,
540
+ stride=stride,
541
+ pad_to_multiple_of=pad_to_multiple_of,
542
+ return_tensors=return_tensors,
543
+ return_token_type_ids=return_token_type_ids,
544
+ return_attention_mask=return_attention_mask,
545
+ return_overflowing_tokens=return_overflowing_tokens,
546
+ return_special_tokens_mask=return_special_tokens_mask,
547
+ return_offsets_mapping=return_offsets_mapping,
548
+ return_length=return_length,
549
+ verbose=verbose,
550
+ **kwargs,
551
+ )
552
+
553
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
554
+ def batch_encode_plus(
555
+ self,
556
+ batch_text_or_text_pairs: Union[
557
+ List[TextInput],
558
+ List[TextInputPair],
559
+ List[PreTokenizedInput],
560
+ ],
561
+ is_pair: bool = None,
562
+ boxes: Optional[List[List[List[int]]]] = None,
563
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
564
+ add_special_tokens: bool = True,
565
+ padding: Union[bool, str, PaddingStrategy] = False,
566
+ truncation: Union[bool, str, TruncationStrategy] = None,
567
+ max_length: Optional[int] = None,
568
+ stride: int = 0,
569
+ pad_to_multiple_of: Optional[int] = None,
570
+ return_tensors: Optional[Union[str, TensorType]] = None,
571
+ return_token_type_ids: Optional[bool] = None,
572
+ return_attention_mask: Optional[bool] = None,
573
+ return_overflowing_tokens: bool = False,
574
+ return_special_tokens_mask: bool = False,
575
+ return_offsets_mapping: bool = False,
576
+ return_length: bool = False,
577
+ verbose: bool = True,
578
+ **kwargs,
579
+ ) -> BatchEncoding:
580
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
581
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
582
+ padding=padding,
583
+ truncation=truncation,
584
+ max_length=max_length,
585
+ pad_to_multiple_of=pad_to_multiple_of,
586
+ verbose=verbose,
587
+ **kwargs,
588
+ )
589
+
590
+ return self._batch_encode_plus(
591
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
592
+ is_pair=is_pair,
593
+ boxes=boxes,
594
+ word_labels=word_labels,
595
+ add_special_tokens=add_special_tokens,
596
+ padding_strategy=padding_strategy,
597
+ truncation_strategy=truncation_strategy,
598
+ max_length=max_length,
599
+ stride=stride,
600
+ pad_to_multiple_of=pad_to_multiple_of,
601
+ return_tensors=return_tensors,
602
+ return_token_type_ids=return_token_type_ids,
603
+ return_attention_mask=return_attention_mask,
604
+ return_overflowing_tokens=return_overflowing_tokens,
605
+ return_special_tokens_mask=return_special_tokens_mask,
606
+ return_offsets_mapping=return_offsets_mapping,
607
+ return_length=return_length,
608
+ verbose=verbose,
609
+ **kwargs,
610
+ )
611
+
612
+ def _batch_encode_plus(
613
+ self,
614
+ batch_text_or_text_pairs: Union[
615
+ List[TextInput],
616
+ List[TextInputPair],
617
+ List[PreTokenizedInput],
618
+ ],
619
+ is_pair: bool = None,
620
+ boxes: Optional[List[List[List[int]]]] = None,
621
+ word_labels: Optional[List[List[int]]] = None,
622
+ add_special_tokens: bool = True,
623
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
624
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
625
+ max_length: Optional[int] = None,
626
+ stride: int = 0,
627
+ pad_to_multiple_of: Optional[int] = None,
628
+ return_tensors: Optional[Union[str, TensorType]] = None,
629
+ return_token_type_ids: Optional[bool] = None,
630
+ return_attention_mask: Optional[bool] = None,
631
+ return_overflowing_tokens: bool = False,
632
+ return_special_tokens_mask: bool = False,
633
+ return_offsets_mapping: bool = False,
634
+ return_length: bool = False,
635
+ verbose: bool = True,
636
+ **kwargs,
637
+ ) -> BatchEncoding:
638
+ if return_offsets_mapping:
639
+ raise NotImplementedError(
640
+ "return_offset_mapping is not available when using Python tokenizers. "
641
+ "To use this feature, change your tokenizer to one deriving from "
642
+ "transformers.PreTrainedTokenizerFast."
643
+ )
644
+
645
+ batch_outputs = self._batch_prepare_for_model(
646
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
647
+ is_pair=is_pair,
648
+ boxes=boxes,
649
+ word_labels=word_labels,
650
+ add_special_tokens=add_special_tokens,
651
+ padding_strategy=padding_strategy,
652
+ truncation_strategy=truncation_strategy,
653
+ max_length=max_length,
654
+ stride=stride,
655
+ pad_to_multiple_of=pad_to_multiple_of,
656
+ return_attention_mask=return_attention_mask,
657
+ return_token_type_ids=return_token_type_ids,
658
+ return_overflowing_tokens=return_overflowing_tokens,
659
+ return_special_tokens_mask=return_special_tokens_mask,
660
+ return_length=return_length,
661
+ return_tensors=return_tensors,
662
+ verbose=verbose,
663
+ )
664
+
665
+ return BatchEncoding(batch_outputs)
666
+
667
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
668
+ def _batch_prepare_for_model(
669
+ self,
670
+ batch_text_or_text_pairs,
671
+ is_pair: bool = None,
672
+ boxes: Optional[List[List[int]]] = None,
673
+ word_labels: Optional[List[List[int]]] = None,
674
+ add_special_tokens: bool = True,
675
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
676
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
677
+ max_length: Optional[int] = None,
678
+ stride: int = 0,
679
+ pad_to_multiple_of: Optional[int] = None,
680
+ return_tensors: Optional[str] = None,
681
+ return_token_type_ids: Optional[bool] = None,
682
+ return_attention_mask: Optional[bool] = None,
683
+ return_overflowing_tokens: bool = False,
684
+ return_special_tokens_mask: bool = False,
685
+ return_length: bool = False,
686
+ verbose: bool = True,
687
+ ) -> BatchEncoding:
688
+ """
689
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
690
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
691
+ manages a moving window (with user defined stride) for overflowing tokens.
692
+
693
+ Args:
694
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
695
+ """
696
+
697
+ batch_outputs = {}
698
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
699
+ batch_text_or_text_pair, boxes_example = example
700
+ outputs = self.prepare_for_model(
701
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
702
+ batch_text_or_text_pair[1] if is_pair else None,
703
+ boxes_example,
704
+ word_labels=word_labels[idx] if word_labels is not None else None,
705
+ add_special_tokens=add_special_tokens,
706
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
707
+ truncation=truncation_strategy.value,
708
+ max_length=max_length,
709
+ stride=stride,
710
+ pad_to_multiple_of=None, # we pad in batch afterward
711
+ return_attention_mask=False, # we pad in batch afterward
712
+ return_token_type_ids=return_token_type_ids,
713
+ return_overflowing_tokens=return_overflowing_tokens,
714
+ return_special_tokens_mask=return_special_tokens_mask,
715
+ return_length=return_length,
716
+ return_tensors=None, # We convert the whole batch to tensors at the end
717
+ prepend_batch_axis=False,
718
+ verbose=verbose,
719
+ )
720
+
721
+ for key, value in outputs.items():
722
+ if key not in batch_outputs:
723
+ batch_outputs[key] = []
724
+ batch_outputs[key].append(value)
725
+
726
+ batch_outputs = self.pad(
727
+ batch_outputs,
728
+ padding=padding_strategy.value,
729
+ max_length=max_length,
730
+ pad_to_multiple_of=pad_to_multiple_of,
731
+ return_attention_mask=return_attention_mask,
732
+ )
733
+
734
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
735
+
736
+ return batch_outputs
737
+
738
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING)
739
+ def encode(
740
+ self,
741
+ text: Union[TextInput, PreTokenizedInput],
742
+ text_pair: Optional[PreTokenizedInput] = None,
743
+ boxes: Optional[List[List[int]]] = None,
744
+ word_labels: Optional[List[int]] = None,
745
+ add_special_tokens: bool = True,
746
+ padding: Union[bool, str, PaddingStrategy] = False,
747
+ truncation: Union[bool, str, TruncationStrategy] = None,
748
+ max_length: Optional[int] = None,
749
+ stride: int = 0,
750
+ pad_to_multiple_of: Optional[int] = None,
751
+ return_tensors: Optional[Union[str, TensorType]] = None,
752
+ return_token_type_ids: Optional[bool] = None,
753
+ return_attention_mask: Optional[bool] = None,
754
+ return_overflowing_tokens: bool = False,
755
+ return_special_tokens_mask: bool = False,
756
+ return_offsets_mapping: bool = False,
757
+ return_length: bool = False,
758
+ verbose: bool = True,
759
+ **kwargs,
760
+ ) -> List[int]:
761
+ encoded_inputs = self.encode_plus(
762
+ text=text,
763
+ text_pair=text_pair,
764
+ boxes=boxes,
765
+ word_labels=word_labels,
766
+ add_special_tokens=add_special_tokens,
767
+ padding=padding,
768
+ truncation=truncation,
769
+ max_length=max_length,
770
+ stride=stride,
771
+ pad_to_multiple_of=pad_to_multiple_of,
772
+ return_tensors=return_tensors,
773
+ return_token_type_ids=return_token_type_ids,
774
+ return_attention_mask=return_attention_mask,
775
+ return_overflowing_tokens=return_overflowing_tokens,
776
+ return_special_tokens_mask=return_special_tokens_mask,
777
+ return_offsets_mapping=return_offsets_mapping,
778
+ return_length=return_length,
779
+ verbose=verbose,
780
+ **kwargs,
781
+ )
782
+
783
+ return encoded_inputs["input_ids"]
784
+
785
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
786
+ def encode_plus(
787
+ self,
788
+ text: Union[TextInput, PreTokenizedInput],
789
+ text_pair: Optional[PreTokenizedInput] = None,
790
+ boxes: Optional[List[List[int]]] = None,
791
+ word_labels: Optional[List[int]] = None,
792
+ add_special_tokens: bool = True,
793
+ padding: Union[bool, str, PaddingStrategy] = False,
794
+ truncation: Union[bool, str, TruncationStrategy] = None,
795
+ max_length: Optional[int] = None,
796
+ stride: int = 0,
797
+ pad_to_multiple_of: Optional[int] = None,
798
+ return_tensors: Optional[Union[str, TensorType]] = None,
799
+ return_token_type_ids: Optional[bool] = None,
800
+ return_attention_mask: Optional[bool] = None,
801
+ return_overflowing_tokens: bool = False,
802
+ return_special_tokens_mask: bool = False,
803
+ return_offsets_mapping: bool = False,
804
+ return_length: bool = False,
805
+ verbose: bool = True,
806
+ **kwargs,
807
+ ) -> BatchEncoding:
808
+ """
809
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
810
+ `__call__` should be used instead.
811
+
812
+ Args:
813
+ text (`str`, `List[str]`, `List[List[str]]`):
814
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
815
+ text_pair (`List[str]` or `List[int]`, *optional*):
816
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
817
+ list of list of strings (words of a batch of examples).
818
+ """
819
+
820
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
821
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
822
+ padding=padding,
823
+ truncation=truncation,
824
+ max_length=max_length,
825
+ pad_to_multiple_of=pad_to_multiple_of,
826
+ verbose=verbose,
827
+ **kwargs,
828
+ )
829
+
830
+ return self._encode_plus(
831
+ text=text,
832
+ boxes=boxes,
833
+ text_pair=text_pair,
834
+ word_labels=word_labels,
835
+ add_special_tokens=add_special_tokens,
836
+ padding_strategy=padding_strategy,
837
+ truncation_strategy=truncation_strategy,
838
+ max_length=max_length,
839
+ stride=stride,
840
+ pad_to_multiple_of=pad_to_multiple_of,
841
+ return_tensors=return_tensors,
842
+ return_token_type_ids=return_token_type_ids,
843
+ return_attention_mask=return_attention_mask,
844
+ return_overflowing_tokens=return_overflowing_tokens,
845
+ return_special_tokens_mask=return_special_tokens_mask,
846
+ return_offsets_mapping=return_offsets_mapping,
847
+ return_length=return_length,
848
+ verbose=verbose,
849
+ **kwargs,
850
+ )
851
+
852
+ def _encode_plus(
853
+ self,
854
+ text: Union[TextInput, PreTokenizedInput],
855
+ text_pair: Optional[PreTokenizedInput] = None,
856
+ boxes: Optional[List[List[int]]] = None,
857
+ word_labels: Optional[List[int]] = None,
858
+ add_special_tokens: bool = True,
859
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
860
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
861
+ max_length: Optional[int] = None,
862
+ stride: int = 0,
863
+ pad_to_multiple_of: Optional[int] = None,
864
+ return_tensors: Optional[Union[str, TensorType]] = None,
865
+ return_token_type_ids: Optional[bool] = None,
866
+ return_attention_mask: Optional[bool] = None,
867
+ return_overflowing_tokens: bool = False,
868
+ return_special_tokens_mask: bool = False,
869
+ return_offsets_mapping: bool = False,
870
+ return_length: bool = False,
871
+ verbose: bool = True,
872
+ **kwargs,
873
+ ) -> BatchEncoding:
874
+ if return_offsets_mapping:
875
+ raise NotImplementedError(
876
+ "return_offset_mapping is not available when using Python tokenizers. "
877
+ "To use this feature, change your tokenizer to one deriving from "
878
+ "transformers.PreTrainedTokenizerFast. "
879
+ "More information on available tokenizers at "
880
+ "https://github.com/huggingface/transformers/pull/2674"
881
+ )
882
+
883
+ return self.prepare_for_model(
884
+ text=text,
885
+ text_pair=text_pair,
886
+ boxes=boxes,
887
+ word_labels=word_labels,
888
+ add_special_tokens=add_special_tokens,
889
+ padding=padding_strategy.value,
890
+ truncation=truncation_strategy.value,
891
+ max_length=max_length,
892
+ stride=stride,
893
+ pad_to_multiple_of=pad_to_multiple_of,
894
+ return_tensors=return_tensors,
895
+ prepend_batch_axis=True,
896
+ return_attention_mask=return_attention_mask,
897
+ return_token_type_ids=return_token_type_ids,
898
+ return_overflowing_tokens=return_overflowing_tokens,
899
+ return_special_tokens_mask=return_special_tokens_mask,
900
+ return_length=return_length,
901
+ verbose=verbose,
902
+ )
903
+
904
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
905
+ def prepare_for_model(
906
+ self,
907
+ text: Union[TextInput, PreTokenizedInput],
908
+ text_pair: Optional[PreTokenizedInput] = None,
909
+ boxes: Optional[List[List[int]]] = None,
910
+ word_labels: Optional[List[int]] = None,
911
+ add_special_tokens: bool = True,
912
+ padding: Union[bool, str, PaddingStrategy] = False,
913
+ truncation: Union[bool, str, TruncationStrategy] = None,
914
+ max_length: Optional[int] = None,
915
+ stride: int = 0,
916
+ pad_to_multiple_of: Optional[int] = None,
917
+ return_tensors: Optional[Union[str, TensorType]] = None,
918
+ return_token_type_ids: Optional[bool] = None,
919
+ return_attention_mask: Optional[bool] = None,
920
+ return_overflowing_tokens: bool = False,
921
+ return_special_tokens_mask: bool = False,
922
+ return_offsets_mapping: bool = False,
923
+ return_length: bool = False,
924
+ verbose: bool = True,
925
+ prepend_batch_axis: bool = False,
926
+ **kwargs,
927
+ ) -> BatchEncoding:
928
+ """
929
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
930
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
931
+ (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
932
+ *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
933
+ combination of arguments will raise an error.
934
+
935
+ Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
936
+ token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
937
+ labeled with -100, such that they will be ignored by the loss function.
938
+
939
+ Args:
940
+ text (`str`, `List[str]`, `List[List[str]]`):
941
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
942
+ text_pair (`List[str]` or `List[int]`, *optional*):
943
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
944
+ list of list of strings (words of a batch of examples).
945
+ """
946
+
947
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
948
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
949
+ padding=padding,
950
+ truncation=truncation,
951
+ max_length=max_length,
952
+ pad_to_multiple_of=pad_to_multiple_of,
953
+ verbose=verbose,
954
+ **kwargs,
955
+ )
956
+
957
+ tokens = []
958
+ pair_tokens = []
959
+ token_boxes = []
960
+ pair_token_boxes = []
961
+ labels = []
962
+
963
+ if text_pair is None:
964
+ if word_labels is None:
965
+ # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
966
+ for word, box in zip(text, boxes):
967
+ if len(word) < 1: # skip empty words
968
+ continue
969
+ word_tokens = self.tokenize(word)
970
+ tokens.extend(word_tokens)
971
+ token_boxes.extend([box] * len(word_tokens))
972
+ else:
973
+ # CASE 2: token classification (training)
974
+ for word, box, label in zip(text, boxes, word_labels):
975
+ if len(word) < 1: # skip empty words
976
+ continue
977
+ word_tokens = self.tokenize(word)
978
+ tokens.extend(word_tokens)
979
+ token_boxes.extend([box] * len(word_tokens))
980
+ if self.only_label_first_subword:
981
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
982
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
983
+ else:
984
+ labels.extend([label] * len(word_tokens))
985
+ else:
986
+ # CASE 3: document visual question answering (inference)
987
+ # text = question
988
+ # text_pair = words
989
+ tokens = self.tokenize(text)
990
+ token_boxes = [self.pad_token_box for _ in range(len(tokens))]
991
+
992
+ for word, box in zip(text_pair, boxes):
993
+ if len(word) < 1: # skip empty words
994
+ continue
995
+ word_tokens = self.tokenize(word)
996
+ pair_tokens.extend(word_tokens)
997
+ pair_token_boxes.extend([box] * len(word_tokens))
998
+
999
+ # Create ids + pair_ids
1000
+ ids = self.convert_tokens_to_ids(tokens)
1001
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
1002
+
1003
+ if (
1004
+ return_overflowing_tokens
1005
+ and truncation_strategy == TruncationStrategy.LONGEST_FIRST
1006
+ and pair_ids is not None
1007
+ ):
1008
+ raise ValueError(
1009
+ "Not possible to return overflowing tokens for pair of sequences with the "
1010
+ "`longest_first`. Please select another truncation strategy than `longest_first`, "
1011
+ "for instance `only_second` or `only_first`."
1012
+ )
1013
+
1014
+ # Compute the total size of the returned encodings
1015
+ pair = bool(pair_ids is not None)
1016
+ len_ids = len(ids)
1017
+ len_pair_ids = len(pair_ids) if pair else 0
1018
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
1019
+
1020
+ # Truncation: Handle max sequence length
1021
+ overflowing_tokens = []
1022
+ overflowing_token_boxes = []
1023
+ overflowing_labels = []
1024
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
1025
+ (
1026
+ ids,
1027
+ token_boxes,
1028
+ pair_ids,
1029
+ pair_token_boxes,
1030
+ labels,
1031
+ overflowing_tokens,
1032
+ overflowing_token_boxes,
1033
+ overflowing_labels,
1034
+ ) = self.truncate_sequences(
1035
+ ids,
1036
+ token_boxes,
1037
+ pair_ids=pair_ids,
1038
+ pair_token_boxes=pair_token_boxes,
1039
+ labels=labels,
1040
+ num_tokens_to_remove=total_len - max_length,
1041
+ truncation_strategy=truncation_strategy,
1042
+ stride=stride,
1043
+ )
1044
+
1045
+ if return_token_type_ids and not add_special_tokens:
1046
+ raise ValueError(
1047
+ "Asking to return token_type_ids while setting add_special_tokens to False "
1048
+ "results in an undefined behavior. Please set add_special_tokens to True or "
1049
+ "set return_token_type_ids to None."
1050
+ )
1051
+
1052
+ # Load from model defaults
1053
+ if return_token_type_ids is None:
1054
+ return_token_type_ids = "token_type_ids" in self.model_input_names
1055
+ if return_attention_mask is None:
1056
+ return_attention_mask = "attention_mask" in self.model_input_names
1057
+
1058
+ encoded_inputs = {}
1059
+
1060
+ if return_overflowing_tokens:
1061
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
1062
+ encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
1063
+ encoded_inputs["overflowing_labels"] = overflowing_labels
1064
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
1065
+
1066
+ # Add special tokens
1067
+ if add_special_tokens:
1068
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
1069
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
1070
+ token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
1071
+ if pair_token_boxes:
1072
+ pair_token_boxes = pair_token_boxes + [self.sep_token_box]
1073
+ if labels:
1074
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
1075
+ else:
1076
+ sequence = ids + pair_ids if pair else ids
1077
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
1078
+
1079
+ # Build output dictionary
1080
+ encoded_inputs["input_ids"] = sequence
1081
+ encoded_inputs["bbox"] = token_boxes + pair_token_boxes
1082
+ if return_token_type_ids:
1083
+ encoded_inputs["token_type_ids"] = token_type_ids
1084
+ if return_special_tokens_mask:
1085
+ if add_special_tokens:
1086
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
1087
+ else:
1088
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
1089
+
1090
+ if labels:
1091
+ encoded_inputs["labels"] = labels
1092
+
1093
+ # Check lengths
1094
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
1095
+
1096
+ # Padding
1097
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
1098
+ encoded_inputs = self.pad(
1099
+ encoded_inputs,
1100
+ max_length=max_length,
1101
+ padding=padding_strategy.value,
1102
+ pad_to_multiple_of=pad_to_multiple_of,
1103
+ return_attention_mask=return_attention_mask,
1104
+ )
1105
+
1106
+ if return_length:
1107
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
1108
+
1109
+ batch_outputs = BatchEncoding(
1110
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
1111
+ )
1112
+
1113
+ return batch_outputs
1114
+
1115
+ def truncate_sequences(
1116
+ self,
1117
+ ids: List[int],
1118
+ token_boxes: List[List[int]],
1119
+ pair_ids: Optional[List[int]] = None,
1120
+ pair_token_boxes: Optional[List[List[int]]] = None,
1121
+ labels: Optional[List[int]] = None,
1122
+ num_tokens_to_remove: int = 0,
1123
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
1124
+ stride: int = 0,
1125
+ ) -> Tuple[List[int], List[int], List[int]]:
1126
+ """
1127
+ Truncates a sequence pair in-place following the strategy.
1128
+
1129
+ Args:
1130
+ ids (`List[int]`):
1131
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
1132
+ `convert_tokens_to_ids` methods.
1133
+ token_boxes (`List[List[int]]`):
1134
+ Bounding boxes of the first sequence.
1135
+ pair_ids (`List[int]`, *optional*):
1136
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
1137
+ and `convert_tokens_to_ids` methods.
1138
+ pair_token_boxes (`List[List[int]]`, *optional*):
1139
+ Bounding boxes of the second sequence.
1140
+ labels (`List[int]`, *optional*):
1141
+ Labels of the first sequence (for token classification tasks).
1142
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
1143
+ Number of tokens to remove using the truncation strategy.
1144
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
1145
+ The strategy to follow for truncation. Can be:
1146
+
1147
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1148
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
1149
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
1150
+ batch of pairs) is provided.
1151
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
1152
+ maximum acceptable input length for the model if that argument is not provided. This will only
1153
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1154
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
1155
+ maximum acceptable input length for the model if that argument is not provided. This will only
1156
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
1157
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
1158
+ than the model maximum admissible input size).
1159
+ stride (`int`, *optional*, defaults to 0):
1160
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
1161
+ sequence returned. The value of this argument defines the number of additional tokens.
1162
+
1163
+ Returns:
1164
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
1165
+ overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
1166
+ of sequences (or a batch of pairs) is provided.
1167
+ """
1168
+ if num_tokens_to_remove <= 0:
1169
+ return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
1170
+
1171
+ if not isinstance(truncation_strategy, TruncationStrategy):
1172
+ truncation_strategy = TruncationStrategy(truncation_strategy)
1173
+
1174
+ overflowing_tokens = []
1175
+ overflowing_token_boxes = []
1176
+ overflowing_labels = []
1177
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
1178
+ truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
1179
+ ):
1180
+ if len(ids) > num_tokens_to_remove:
1181
+ window_len = min(len(ids), stride + num_tokens_to_remove)
1182
+ overflowing_tokens = ids[-window_len:]
1183
+ overflowing_token_boxes = token_boxes[-window_len:]
1184
+ overflowing_labels = labels[-window_len:]
1185
+ ids = ids[:-num_tokens_to_remove]
1186
+ token_boxes = token_boxes[:-num_tokens_to_remove]
1187
+ labels = labels[:-num_tokens_to_remove]
1188
+ else:
1189
+ error_msg = (
1190
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1191
+ f"but the first sequence has a length {len(ids)}. "
1192
+ )
1193
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST:
1194
+ error_msg = (
1195
+ error_msg + "Please select another truncation strategy than "
1196
+ f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
1197
+ )
1198
+ logger.error(error_msg)
1199
+ elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
1200
+ logger.warning(
1201
+ "Be aware, overflowing tokens are not returned for the setting you have chosen,"
1202
+ f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
1203
+ "truncation strategy. So the returned list will always be empty even if some "
1204
+ "tokens have been removed."
1205
+ )
1206
+ for _ in range(num_tokens_to_remove):
1207
+ if pair_ids is None or len(ids) > len(pair_ids):
1208
+ ids = ids[:-1]
1209
+ token_boxes = token_boxes[:-1]
1210
+ labels = labels[:-1]
1211
+ else:
1212
+ pair_ids = pair_ids[:-1]
1213
+ pair_token_boxes = pair_token_boxes[:-1]
1214
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
1215
+ if len(pair_ids) > num_tokens_to_remove:
1216
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
1217
+ overflowing_tokens = pair_ids[-window_len:]
1218
+ overflowing_token_boxes = pair_token_boxes[-window_len:]
1219
+ pair_ids = pair_ids[:-num_tokens_to_remove]
1220
+ pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
1221
+ else:
1222
+ logger.error(
1223
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
1224
+ f"but the second sequence has a length {len(pair_ids)}. "
1225
+ f"Please select another truncation strategy than {truncation_strategy}, "
1226
+ "for instance 'longest_first' or 'only_first'."
1227
+ )
1228
+
1229
+ return (
1230
+ ids,
1231
+ token_boxes,
1232
+ pair_ids,
1233
+ pair_token_boxes,
1234
+ labels,
1235
+ overflowing_tokens,
1236
+ overflowing_token_boxes,
1237
+ overflowing_labels,
1238
+ )
1239
+
1240
+ def _pad(
1241
+ self,
1242
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
1243
+ max_length: Optional[int] = None,
1244
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1245
+ pad_to_multiple_of: Optional[int] = None,
1246
+ return_attention_mask: Optional[bool] = None,
1247
+ ) -> dict:
1248
+ """
1249
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
1250
+
1251
+ Args:
1252
+ encoded_inputs:
1253
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
1254
+ max_length: maximum length of the returned list and optionally padding length (see below).
1255
+ Will truncate by taking into account the special tokens.
1256
+ padding_strategy: PaddingStrategy to use for padding.
1257
+
1258
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
1259
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
1260
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
1261
+ The tokenizer padding sides are defined in self.padding_side:
1262
+
1263
+ - 'left': pads on the left of the sequences
1264
+ - 'right': pads on the right of the sequences
1265
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
1266
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
1267
+ `>= 7.5` (Volta).
1268
+ return_attention_mask:
1269
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
1270
+ """
1271
+ # Load from model defaults
1272
+ if return_attention_mask is None:
1273
+ return_attention_mask = "attention_mask" in self.model_input_names
1274
+
1275
+ required_input = encoded_inputs[self.model_input_names[0]]
1276
+
1277
+ if padding_strategy == PaddingStrategy.LONGEST:
1278
+ max_length = len(required_input)
1279
+
1280
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
1281
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
1282
+
1283
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
1284
+
1285
+ # Initialize attention mask if not present.
1286
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
1287
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
1288
+
1289
+ if needs_to_be_padded:
1290
+ difference = max_length - len(required_input)
1291
+ if self.padding_side == "right":
1292
+ if return_attention_mask:
1293
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
1294
+ if "token_type_ids" in encoded_inputs:
1295
+ encoded_inputs["token_type_ids"] = (
1296
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
1297
+ )
1298
+ if "bbox" in encoded_inputs:
1299
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
1300
+ if "labels" in encoded_inputs:
1301
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
1302
+ if "special_tokens_mask" in encoded_inputs:
1303
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
1304
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
1305
+ elif self.padding_side == "left":
1306
+ if return_attention_mask:
1307
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
1308
+ if "token_type_ids" in encoded_inputs:
1309
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
1310
+ "token_type_ids"
1311
+ ]
1312
+ if "bbox" in encoded_inputs:
1313
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
1314
+ if "labels" in encoded_inputs:
1315
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
1316
+ if "special_tokens_mask" in encoded_inputs:
1317
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
1318
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
1319
+ else:
1320
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
1321
+
1322
+ return encoded_inputs
1323
+
1324
+
1325
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
1326
+ class BasicTokenizer(object):
1327
+ """
1328
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
1329
+
1330
+ Args:
1331
+ do_lower_case (`bool`, *optional*, defaults to `True`):
1332
+ Whether or not to lowercase the input when tokenizing.
1333
+ never_split (`Iterable`, *optional*):
1334
+ Collection of tokens which will never be split during tokenization. Only has an effect when
1335
+ `do_basic_tokenize=True`
1336
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
1337
+ Whether or not to tokenize Chinese characters.
1338
+
1339
+ This should likely be deactivated for Japanese (see this
1340
+ [issue](https://github.com/huggingface/transformers/issues/328)).
1341
+ strip_accents (`bool`, *optional*):
1342
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
1343
+ value for `lowercase` (as in the original BERT).
1344
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
1345
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
1346
+ the full context of the words, such as contractions.
1347
+ """
1348
+
1349
+ def __init__(
1350
+ self,
1351
+ do_lower_case=True,
1352
+ never_split=None,
1353
+ tokenize_chinese_chars=True,
1354
+ strip_accents=None,
1355
+ do_split_on_punc=True,
1356
+ ):
1357
+ if never_split is None:
1358
+ never_split = []
1359
+ self.do_lower_case = do_lower_case
1360
+ self.never_split = set(never_split)
1361
+ self.tokenize_chinese_chars = tokenize_chinese_chars
1362
+ self.strip_accents = strip_accents
1363
+ self.do_split_on_punc = do_split_on_punc
1364
+
1365
+ def tokenize(self, text, never_split=None):
1366
+ """
1367
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
1368
+
1369
+ Args:
1370
+ never_split (`List[str]`, *optional*)
1371
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
1372
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
1373
+ """
1374
+ # union() returns a new set by concatenating the two sets.
1375
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
1376
+ text = self._clean_text(text)
1377
+
1378
+ # This was added on November 1st, 2018 for the multilingual and Chinese
1379
+ # models. This is also applied to the English models now, but it doesn't
1380
+ # matter since the English models were not trained on any Chinese data
1381
+ # and generally don't have any Chinese data in them (there are Chinese
1382
+ # characters in the vocabulary because Wikipedia does have some Chinese
1383
+ # words in the English Wikipedia.).
1384
+ if self.tokenize_chinese_chars:
1385
+ text = self._tokenize_chinese_chars(text)
1386
+ # prevents treating the same character with different unicode codepoints as different characters
1387
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
1388
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
1389
+ split_tokens = []
1390
+ for token in orig_tokens:
1391
+ if token not in never_split:
1392
+ if self.do_lower_case:
1393
+ token = token.lower()
1394
+ if self.strip_accents is not False:
1395
+ token = self._run_strip_accents(token)
1396
+ elif self.strip_accents:
1397
+ token = self._run_strip_accents(token)
1398
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
1399
+
1400
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
1401
+ return output_tokens
1402
+
1403
+ def _run_strip_accents(self, text):
1404
+ """Strips accents from a piece of text."""
1405
+ text = unicodedata.normalize("NFD", text)
1406
+ output = []
1407
+ for char in text:
1408
+ cat = unicodedata.category(char)
1409
+ if cat == "Mn":
1410
+ continue
1411
+ output.append(char)
1412
+ return "".join(output)
1413
+
1414
+ def _run_split_on_punc(self, text, never_split=None):
1415
+ """Splits punctuation on a piece of text."""
1416
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
1417
+ return [text]
1418
+ chars = list(text)
1419
+ i = 0
1420
+ start_new_word = True
1421
+ output = []
1422
+ while i < len(chars):
1423
+ char = chars[i]
1424
+ if _is_punctuation(char):
1425
+ output.append([char])
1426
+ start_new_word = True
1427
+ else:
1428
+ if start_new_word:
1429
+ output.append([])
1430
+ start_new_word = False
1431
+ output[-1].append(char)
1432
+ i += 1
1433
+
1434
+ return ["".join(x) for x in output]
1435
+
1436
+ def _tokenize_chinese_chars(self, text):
1437
+ """Adds whitespace around any CJK character."""
1438
+ output = []
1439
+ for char in text:
1440
+ cp = ord(char)
1441
+ if self._is_chinese_char(cp):
1442
+ output.append(" ")
1443
+ output.append(char)
1444
+ output.append(" ")
1445
+ else:
1446
+ output.append(char)
1447
+ return "".join(output)
1448
+
1449
+ def _is_chinese_char(self, cp):
1450
+ """Checks whether CP is the codepoint of a CJK character."""
1451
+ # This defines a "chinese character" as anything in the CJK Unicode block:
1452
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
1453
+ #
1454
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
1455
+ # despite its name. The modern Korean Hangul alphabet is a different block,
1456
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
1457
+ # space-separated words, so they are not treated specially and handled
1458
+ # like the all of the other languages.
1459
+ if (
1460
+ (cp >= 0x4E00 and cp <= 0x9FFF)
1461
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
1462
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
1463
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
1464
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
1465
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
1466
+ or (cp >= 0xF900 and cp <= 0xFAFF)
1467
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
1468
+ ): #
1469
+ return True
1470
+
1471
+ return False
1472
+
1473
+ def _clean_text(self, text):
1474
+ """Performs invalid character removal and whitespace cleanup on text."""
1475
+ output = []
1476
+ for char in text:
1477
+ cp = ord(char)
1478
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
1479
+ continue
1480
+ if _is_whitespace(char):
1481
+ output.append(" ")
1482
+ else:
1483
+ output.append(char)
1484
+ return "".join(output)
1485
+
1486
+
1487
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
1488
+ class WordpieceTokenizer(object):
1489
+ """Runs WordPiece tokenization."""
1490
+
1491
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
1492
+ self.vocab = vocab
1493
+ self.unk_token = unk_token
1494
+ self.max_input_chars_per_word = max_input_chars_per_word
1495
+
1496
+ def tokenize(self, text):
1497
+ """
1498
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
1499
+ tokenization using the given vocabulary.
1500
+
1501
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
1502
+
1503
+ Args:
1504
+ text: A single token or whitespace separated tokens. This should have
1505
+ already been passed through *BasicTokenizer*.
1506
+
1507
+ Returns:
1508
+ A list of wordpiece tokens.
1509
+ """
1510
+
1511
+ output_tokens = []
1512
+ for token in whitespace_tokenize(text):
1513
+ chars = list(token)
1514
+ if len(chars) > self.max_input_chars_per_word:
1515
+ output_tokens.append(self.unk_token)
1516
+ continue
1517
+
1518
+ is_bad = False
1519
+ start = 0
1520
+ sub_tokens = []
1521
+ while start < len(chars):
1522
+ end = len(chars)
1523
+ cur_substr = None
1524
+ while start < end:
1525
+ substr = "".join(chars[start:end])
1526
+ if start > 0:
1527
+ substr = "##" + substr
1528
+ if substr in self.vocab:
1529
+ cur_substr = substr
1530
+ break
1531
+ end -= 1
1532
+ if cur_substr is None:
1533
+ is_bad = True
1534
+ break
1535
+ sub_tokens.append(cur_substr)
1536
+ start = end
1537
+
1538
+ if is_bad:
1539
+ output_tokens.append(self.unk_token)
1540
+ else:
1541
+ output_tokens.extend(sub_tokens)
1542
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
17
+ and _encode_plus, in which the Rust tokenizer is used.
18
+ """
19
+
20
+ import json
21
+ from typing import Dict, List, Optional, Tuple, Union
22
+
23
+ from tokenizers import normalizers
24
+
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ EncodedInput,
28
+ PaddingStrategy,
29
+ PreTokenizedInput,
30
+ TensorType,
31
+ TextInput,
32
+ TextInputPair,
33
+ TruncationStrategy,
34
+ )
35
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
36
+ from ...utils import add_end_docstrings, logging
37
+ from .tokenization_layoutlmv2 import (
38
+ LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING,
39
+ LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
40
+ LayoutLMv2Tokenizer,
41
+ )
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
47
+
48
+
49
+ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
50
+ r"""
51
+ Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
52
+
53
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
54
+ refer to this superclass for more information regarding those methods.
55
+
56
+ Args:
57
+ vocab_file (`str`):
58
+ File containing the vocabulary.
59
+ do_lower_case (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to lowercase the input when tokenizing.
61
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
62
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
63
+ token instead.
64
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
65
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
66
+ sequence classification or for a text and a question for question answering. It is also used as the last
67
+ token of a sequence built with special tokens.
68
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
69
+ The token used for padding, for example when batching sequences of different lengths.
70
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
71
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
72
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
73
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
74
+ The token used for masking values. This is the token used when training this model with masked language
75
+ modeling. This is the token which the model will try to predict.
76
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
77
+ The bounding box to use for the special [CLS] token.
78
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
79
+ The bounding box to use for the special [SEP] token.
80
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
81
+ The bounding box to use for the special [PAD] token.
82
+ pad_token_label (`int`, *optional*, defaults to -100):
83
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
84
+ CrossEntropyLoss.
85
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to only label the first subword, in case word labels are provided.
87
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
88
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
89
+ issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original LayoutLMv2).
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+ slow_tokenizer_class = LayoutLMv2Tokenizer
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_file=None,
101
+ tokenizer_file=None,
102
+ do_lower_case=True,
103
+ unk_token="[UNK]",
104
+ sep_token="[SEP]",
105
+ pad_token="[PAD]",
106
+ cls_token="[CLS]",
107
+ mask_token="[MASK]",
108
+ cls_token_box=[0, 0, 0, 0],
109
+ sep_token_box=[1000, 1000, 1000, 1000],
110
+ pad_token_box=[0, 0, 0, 0],
111
+ pad_token_label=-100,
112
+ only_label_first_subword=True,
113
+ tokenize_chinese_chars=True,
114
+ strip_accents=None,
115
+ **kwargs,
116
+ ):
117
+ super().__init__(
118
+ vocab_file,
119
+ tokenizer_file=tokenizer_file,
120
+ do_lower_case=do_lower_case,
121
+ unk_token=unk_token,
122
+ sep_token=sep_token,
123
+ pad_token=pad_token,
124
+ cls_token=cls_token,
125
+ mask_token=mask_token,
126
+ cls_token_box=cls_token_box,
127
+ sep_token_box=sep_token_box,
128
+ pad_token_box=pad_token_box,
129
+ pad_token_label=pad_token_label,
130
+ only_label_first_subword=only_label_first_subword,
131
+ tokenize_chinese_chars=tokenize_chinese_chars,
132
+ strip_accents=strip_accents,
133
+ **kwargs,
134
+ )
135
+
136
+ pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
137
+ if (
138
+ pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
139
+ or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
140
+ ):
141
+ pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
142
+ pre_tok_state["lowercase"] = do_lower_case
143
+ pre_tok_state["strip_accents"] = strip_accents
144
+ self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
145
+
146
+ self.do_lower_case = do_lower_case
147
+
148
+ # additional properties
149
+ self.cls_token_box = cls_token_box
150
+ self.sep_token_box = sep_token_box
151
+ self.pad_token_box = pad_token_box
152
+ self.pad_token_label = pad_token_label
153
+ self.only_label_first_subword = only_label_first_subword
154
+
155
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
156
+ def __call__(
157
+ self,
158
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
159
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
160
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
161
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
162
+ add_special_tokens: bool = True,
163
+ padding: Union[bool, str, PaddingStrategy] = False,
164
+ truncation: Union[bool, str, TruncationStrategy] = None,
165
+ max_length: Optional[int] = None,
166
+ stride: int = 0,
167
+ pad_to_multiple_of: Optional[int] = None,
168
+ return_tensors: Optional[Union[str, TensorType]] = None,
169
+ return_token_type_ids: Optional[bool] = None,
170
+ return_attention_mask: Optional[bool] = None,
171
+ return_overflowing_tokens: bool = False,
172
+ return_special_tokens_mask: bool = False,
173
+ return_offsets_mapping: bool = False,
174
+ return_length: bool = False,
175
+ verbose: bool = True,
176
+ **kwargs,
177
+ ) -> BatchEncoding:
178
+ """
179
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
180
+ sequences with word-level normalized bounding boxes and optional labels.
181
+
182
+ Args:
183
+ text (`str`, `List[str]`, `List[List[str]]`):
184
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
185
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
186
+ words).
187
+ text_pair (`List[str]`, `List[List[str]]`):
188
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
189
+ (pretokenized string).
190
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
191
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
192
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
193
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
194
+ """
195
+
196
+ # Input type checking for clearer error
197
+ def _is_valid_text_input(t):
198
+ if isinstance(t, str):
199
+ # Strings are fine
200
+ return True
201
+ elif isinstance(t, (list, tuple)):
202
+ # List are fine as long as they are...
203
+ if len(t) == 0:
204
+ # ... empty
205
+ return True
206
+ elif isinstance(t[0], str):
207
+ # ... list of strings
208
+ return True
209
+ elif isinstance(t[0], (list, tuple)):
210
+ # ... list with an empty list or with a list of strings
211
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
212
+ else:
213
+ return False
214
+ else:
215
+ return False
216
+
217
+ if text_pair is not None:
218
+ # in case text + text_pair are provided, text = questions, text_pair = words
219
+ if not _is_valid_text_input(text):
220
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
221
+ if not isinstance(text_pair, (list, tuple)):
222
+ raise ValueError(
223
+ "Words must be of type `List[str]` (single pretokenized example), "
224
+ "or `List[List[str]]` (batch of pretokenized examples)."
225
+ )
226
+ else:
227
+ # in case only text is provided => must be words
228
+ if not isinstance(text, (list, tuple)):
229
+ raise ValueError(
230
+ "Words must be of type `List[str]` (single pretokenized example), "
231
+ "or `List[List[str]]` (batch of pretokenized examples)."
232
+ )
233
+
234
+ if text_pair is not None:
235
+ is_batched = isinstance(text, (list, tuple))
236
+ else:
237
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
238
+
239
+ words = text if text_pair is None else text_pair
240
+ if boxes is None:
241
+ raise ValueError("You must provide corresponding bounding boxes")
242
+ if is_batched:
243
+ if len(words) != len(boxes):
244
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
245
+ for words_example, boxes_example in zip(words, boxes):
246
+ if len(words_example) != len(boxes_example):
247
+ raise ValueError("You must provide as many words as there are bounding boxes")
248
+ else:
249
+ if len(words) != len(boxes):
250
+ raise ValueError("You must provide as many words as there are bounding boxes")
251
+
252
+ if is_batched:
253
+ if text_pair is not None and len(text) != len(text_pair):
254
+ raise ValueError(
255
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
256
+ f" {len(text_pair)}."
257
+ )
258
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
259
+ is_pair = bool(text_pair is not None)
260
+ return self.batch_encode_plus(
261
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
262
+ is_pair=is_pair,
263
+ boxes=boxes,
264
+ word_labels=word_labels,
265
+ add_special_tokens=add_special_tokens,
266
+ padding=padding,
267
+ truncation=truncation,
268
+ max_length=max_length,
269
+ stride=stride,
270
+ pad_to_multiple_of=pad_to_multiple_of,
271
+ return_tensors=return_tensors,
272
+ return_token_type_ids=return_token_type_ids,
273
+ return_attention_mask=return_attention_mask,
274
+ return_overflowing_tokens=return_overflowing_tokens,
275
+ return_special_tokens_mask=return_special_tokens_mask,
276
+ return_offsets_mapping=return_offsets_mapping,
277
+ return_length=return_length,
278
+ verbose=verbose,
279
+ **kwargs,
280
+ )
281
+ else:
282
+ return self.encode_plus(
283
+ text=text,
284
+ text_pair=text_pair,
285
+ boxes=boxes,
286
+ word_labels=word_labels,
287
+ add_special_tokens=add_special_tokens,
288
+ padding=padding,
289
+ truncation=truncation,
290
+ max_length=max_length,
291
+ stride=stride,
292
+ pad_to_multiple_of=pad_to_multiple_of,
293
+ return_tensors=return_tensors,
294
+ return_token_type_ids=return_token_type_ids,
295
+ return_attention_mask=return_attention_mask,
296
+ return_overflowing_tokens=return_overflowing_tokens,
297
+ return_special_tokens_mask=return_special_tokens_mask,
298
+ return_offsets_mapping=return_offsets_mapping,
299
+ return_length=return_length,
300
+ verbose=verbose,
301
+ **kwargs,
302
+ )
303
+
304
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
305
+ def batch_encode_plus(
306
+ self,
307
+ batch_text_or_text_pairs: Union[
308
+ List[TextInput],
309
+ List[TextInputPair],
310
+ List[PreTokenizedInput],
311
+ ],
312
+ is_pair: bool = None,
313
+ boxes: Optional[List[List[List[int]]]] = None,
314
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
315
+ add_special_tokens: bool = True,
316
+ padding: Union[bool, str, PaddingStrategy] = False,
317
+ truncation: Union[bool, str, TruncationStrategy] = None,
318
+ max_length: Optional[int] = None,
319
+ stride: int = 0,
320
+ pad_to_multiple_of: Optional[int] = None,
321
+ return_tensors: Optional[Union[str, TensorType]] = None,
322
+ return_token_type_ids: Optional[bool] = None,
323
+ return_attention_mask: Optional[bool] = None,
324
+ return_overflowing_tokens: bool = False,
325
+ return_special_tokens_mask: bool = False,
326
+ return_offsets_mapping: bool = False,
327
+ return_length: bool = False,
328
+ verbose: bool = True,
329
+ **kwargs,
330
+ ) -> BatchEncoding:
331
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
332
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
333
+ padding=padding,
334
+ truncation=truncation,
335
+ max_length=max_length,
336
+ pad_to_multiple_of=pad_to_multiple_of,
337
+ verbose=verbose,
338
+ **kwargs,
339
+ )
340
+
341
+ return self._batch_encode_plus(
342
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
343
+ is_pair=is_pair,
344
+ boxes=boxes,
345
+ word_labels=word_labels,
346
+ add_special_tokens=add_special_tokens,
347
+ padding_strategy=padding_strategy,
348
+ truncation_strategy=truncation_strategy,
349
+ max_length=max_length,
350
+ stride=stride,
351
+ pad_to_multiple_of=pad_to_multiple_of,
352
+ return_tensors=return_tensors,
353
+ return_token_type_ids=return_token_type_ids,
354
+ return_attention_mask=return_attention_mask,
355
+ return_overflowing_tokens=return_overflowing_tokens,
356
+ return_special_tokens_mask=return_special_tokens_mask,
357
+ return_offsets_mapping=return_offsets_mapping,
358
+ return_length=return_length,
359
+ verbose=verbose,
360
+ **kwargs,
361
+ )
362
+
363
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
364
+ batched_input = [(text, pair)] if pair else [text]
365
+ encodings = self._tokenizer.encode_batch(
366
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
367
+ )
368
+
369
+ return encodings[0].tokens
370
+
371
+ @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
372
+ def encode_plus(
373
+ self,
374
+ text: Union[TextInput, PreTokenizedInput],
375
+ text_pair: Optional[PreTokenizedInput] = None,
376
+ boxes: Optional[List[List[int]]] = None,
377
+ word_labels: Optional[List[int]] = None,
378
+ add_special_tokens: bool = True,
379
+ padding: Union[bool, str, PaddingStrategy] = False,
380
+ truncation: Union[bool, str, TruncationStrategy] = None,
381
+ max_length: Optional[int] = None,
382
+ stride: int = 0,
383
+ pad_to_multiple_of: Optional[int] = None,
384
+ return_tensors: Optional[Union[str, TensorType]] = None,
385
+ return_token_type_ids: Optional[bool] = None,
386
+ return_attention_mask: Optional[bool] = None,
387
+ return_overflowing_tokens: bool = False,
388
+ return_special_tokens_mask: bool = False,
389
+ return_offsets_mapping: bool = False,
390
+ return_length: bool = False,
391
+ verbose: bool = True,
392
+ **kwargs,
393
+ ) -> BatchEncoding:
394
+ """
395
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
396
+ `__call__` should be used instead.
397
+
398
+ Args:
399
+ text (`str`, `List[str]`, `List[List[str]]`):
400
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
401
+ text_pair (`List[str]` or `List[int]`, *optional*):
402
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
403
+ list of list of strings (words of a batch of examples).
404
+ """
405
+
406
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
407
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
408
+ padding=padding,
409
+ truncation=truncation,
410
+ max_length=max_length,
411
+ pad_to_multiple_of=pad_to_multiple_of,
412
+ verbose=verbose,
413
+ **kwargs,
414
+ )
415
+
416
+ return self._encode_plus(
417
+ text=text,
418
+ boxes=boxes,
419
+ text_pair=text_pair,
420
+ word_labels=word_labels,
421
+ add_special_tokens=add_special_tokens,
422
+ padding_strategy=padding_strategy,
423
+ truncation_strategy=truncation_strategy,
424
+ max_length=max_length,
425
+ stride=stride,
426
+ pad_to_multiple_of=pad_to_multiple_of,
427
+ return_tensors=return_tensors,
428
+ return_token_type_ids=return_token_type_ids,
429
+ return_attention_mask=return_attention_mask,
430
+ return_overflowing_tokens=return_overflowing_tokens,
431
+ return_special_tokens_mask=return_special_tokens_mask,
432
+ return_offsets_mapping=return_offsets_mapping,
433
+ return_length=return_length,
434
+ verbose=verbose,
435
+ **kwargs,
436
+ )
437
+
438
+ def _batch_encode_plus(
439
+ self,
440
+ batch_text_or_text_pairs: Union[
441
+ List[TextInput],
442
+ List[TextInputPair],
443
+ List[PreTokenizedInput],
444
+ ],
445
+ is_pair: bool = None,
446
+ boxes: Optional[List[List[List[int]]]] = None,
447
+ word_labels: Optional[List[List[int]]] = None,
448
+ add_special_tokens: bool = True,
449
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
450
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
451
+ max_length: Optional[int] = None,
452
+ stride: int = 0,
453
+ pad_to_multiple_of: Optional[int] = None,
454
+ return_tensors: Optional[str] = None,
455
+ return_token_type_ids: Optional[bool] = None,
456
+ return_attention_mask: Optional[bool] = None,
457
+ return_overflowing_tokens: bool = False,
458
+ return_special_tokens_mask: bool = False,
459
+ return_offsets_mapping: bool = False,
460
+ return_length: bool = False,
461
+ verbose: bool = True,
462
+ ) -> BatchEncoding:
463
+ if not isinstance(batch_text_or_text_pairs, list):
464
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
465
+
466
+ # Set the truncation and padding strategy and restore the initial configuration
467
+ self.set_truncation_and_padding(
468
+ padding_strategy=padding_strategy,
469
+ truncation_strategy=truncation_strategy,
470
+ max_length=max_length,
471
+ stride=stride,
472
+ pad_to_multiple_of=pad_to_multiple_of,
473
+ )
474
+
475
+ if is_pair:
476
+ batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
477
+
478
+ encodings = self._tokenizer.encode_batch(
479
+ batch_text_or_text_pairs,
480
+ add_special_tokens=add_special_tokens,
481
+ is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
482
+ )
483
+
484
+ # Convert encoding to dict
485
+ # `Tokens` has type: Tuple[
486
+ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
487
+ # List[EncodingFast]
488
+ # ]
489
+ # with nested dimensions corresponding to batch, overflows, sequence length
490
+ tokens_and_encodings = [
491
+ self._convert_encoding(
492
+ encoding=encoding,
493
+ return_token_type_ids=return_token_type_ids,
494
+ return_attention_mask=return_attention_mask,
495
+ return_overflowing_tokens=return_overflowing_tokens,
496
+ return_special_tokens_mask=return_special_tokens_mask,
497
+ return_offsets_mapping=True
498
+ if word_labels is not None
499
+ else return_offsets_mapping, # we use offsets to create the labels
500
+ return_length=return_length,
501
+ verbose=verbose,
502
+ )
503
+ for encoding in encodings
504
+ ]
505
+
506
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
507
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
508
+ # (we say ~ because the number of overflow varies with the example in the batch)
509
+ #
510
+ # To match each overflowing sample with the original sample in the batch
511
+ # we add an overflow_to_sample_mapping array (see below)
512
+ sanitized_tokens = {}
513
+ for key in tokens_and_encodings[0][0].keys():
514
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
515
+ sanitized_tokens[key] = stack
516
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
517
+
518
+ # If returning overflowing tokens, we need to return a mapping
519
+ # from the batch idx to the original sample
520
+ if return_overflowing_tokens:
521
+ overflow_to_sample_mapping = []
522
+ for i, (toks, _) in enumerate(tokens_and_encodings):
523
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
524
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
525
+
526
+ for input_ids in sanitized_tokens["input_ids"]:
527
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
528
+
529
+ # create the token boxes
530
+ token_boxes = []
531
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
532
+ if return_overflowing_tokens:
533
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
534
+ else:
535
+ original_index = batch_index
536
+ token_boxes_example = []
537
+ for id, sequence_id, word_id in zip(
538
+ sanitized_tokens["input_ids"][batch_index],
539
+ sanitized_encodings[batch_index].sequence_ids,
540
+ sanitized_encodings[batch_index].word_ids,
541
+ ):
542
+ if word_id is not None:
543
+ if is_pair and sequence_id == 0:
544
+ token_boxes_example.append(self.pad_token_box)
545
+ else:
546
+ token_boxes_example.append(boxes[original_index][word_id])
547
+ else:
548
+ if id == self.cls_token_id:
549
+ token_boxes_example.append(self.cls_token_box)
550
+ elif id == self.sep_token_id:
551
+ token_boxes_example.append(self.sep_token_box)
552
+ elif id == self.pad_token_id:
553
+ token_boxes_example.append(self.pad_token_box)
554
+ else:
555
+ raise ValueError("Id not recognized")
556
+ token_boxes.append(token_boxes_example)
557
+
558
+ sanitized_tokens["bbox"] = token_boxes
559
+
560
+ # optionally, create the labels
561
+ if word_labels is not None:
562
+ labels = []
563
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
564
+ if return_overflowing_tokens:
565
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
566
+ else:
567
+ original_index = batch_index
568
+ labels_example = []
569
+ for id, offset, word_id in zip(
570
+ sanitized_tokens["input_ids"][batch_index],
571
+ sanitized_tokens["offset_mapping"][batch_index],
572
+ sanitized_encodings[batch_index].word_ids,
573
+ ):
574
+ if word_id is not None:
575
+ if self.only_label_first_subword:
576
+ if offset[0] == 0:
577
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
578
+ labels_example.append(word_labels[original_index][word_id])
579
+ else:
580
+ labels_example.append(self.pad_token_label)
581
+ else:
582
+ labels_example.append(word_labels[original_index][word_id])
583
+ else:
584
+ labels_example.append(self.pad_token_label)
585
+ labels.append(labels_example)
586
+
587
+ sanitized_tokens["labels"] = labels
588
+ # finally, remove offsets if the user didn't want them
589
+ if not return_offsets_mapping:
590
+ del sanitized_tokens["offset_mapping"]
591
+
592
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
593
+
594
+ def _encode_plus(
595
+ self,
596
+ text: Union[TextInput, PreTokenizedInput],
597
+ text_pair: Optional[PreTokenizedInput] = None,
598
+ boxes: Optional[List[List[int]]] = None,
599
+ word_labels: Optional[List[int]] = None,
600
+ add_special_tokens: bool = True,
601
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
602
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
603
+ max_length: Optional[int] = None,
604
+ stride: int = 0,
605
+ pad_to_multiple_of: Optional[int] = None,
606
+ return_tensors: Optional[bool] = None,
607
+ return_token_type_ids: Optional[bool] = None,
608
+ return_attention_mask: Optional[bool] = None,
609
+ return_overflowing_tokens: bool = False,
610
+ return_special_tokens_mask: bool = False,
611
+ return_offsets_mapping: bool = False,
612
+ return_length: bool = False,
613
+ verbose: bool = True,
614
+ **kwargs,
615
+ ) -> BatchEncoding:
616
+ # make it a batched input
617
+ # 2 options:
618
+ # 1) only text, in case text must be a list of str
619
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
620
+ batched_input = [(text, text_pair)] if text_pair else [text]
621
+ batched_boxes = [boxes]
622
+ batched_word_labels = [word_labels] if word_labels is not None else None
623
+ batched_output = self._batch_encode_plus(
624
+ batched_input,
625
+ is_pair=bool(text_pair is not None),
626
+ boxes=batched_boxes,
627
+ word_labels=batched_word_labels,
628
+ add_special_tokens=add_special_tokens,
629
+ padding_strategy=padding_strategy,
630
+ truncation_strategy=truncation_strategy,
631
+ max_length=max_length,
632
+ stride=stride,
633
+ pad_to_multiple_of=pad_to_multiple_of,
634
+ return_tensors=return_tensors,
635
+ return_token_type_ids=return_token_type_ids,
636
+ return_attention_mask=return_attention_mask,
637
+ return_overflowing_tokens=return_overflowing_tokens,
638
+ return_special_tokens_mask=return_special_tokens_mask,
639
+ return_offsets_mapping=return_offsets_mapping,
640
+ return_length=return_length,
641
+ verbose=verbose,
642
+ **kwargs,
643
+ )
644
+
645
+ # Return tensor is None, then we can remove the leading batch axis
646
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
647
+ if return_tensors is None and not return_overflowing_tokens:
648
+ batched_output = BatchEncoding(
649
+ {
650
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
651
+ for key, value in batched_output.items()
652
+ },
653
+ batched_output.encodings,
654
+ )
655
+
656
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
657
+
658
+ return batched_output
659
+
660
+ def _pad(
661
+ self,
662
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
663
+ max_length: Optional[int] = None,
664
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
665
+ pad_to_multiple_of: Optional[int] = None,
666
+ return_attention_mask: Optional[bool] = None,
667
+ ) -> dict:
668
+ """
669
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
670
+
671
+ Args:
672
+ encoded_inputs:
673
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
674
+ max_length: maximum length of the returned list and optionally padding length (see below).
675
+ Will truncate by taking into account the special tokens.
676
+ padding_strategy: PaddingStrategy to use for padding.
677
+
678
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
679
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
680
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
681
+ The tokenizer padding sides are defined in self.padding_side:
682
+
683
+ - 'left': pads on the left of the sequences
684
+ - 'right': pads on the right of the sequences
685
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
686
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
687
+ `>= 7.5` (Volta).
688
+ return_attention_mask:
689
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
690
+ """
691
+ # Load from model defaults
692
+ if return_attention_mask is None:
693
+ return_attention_mask = "attention_mask" in self.model_input_names
694
+
695
+ required_input = encoded_inputs[self.model_input_names[0]]
696
+
697
+ if padding_strategy == PaddingStrategy.LONGEST:
698
+ max_length = len(required_input)
699
+
700
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
701
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
702
+
703
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
704
+
705
+ # Initialize attention mask if not present.
706
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
707
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
708
+
709
+ if needs_to_be_padded:
710
+ difference = max_length - len(required_input)
711
+ if self.padding_side == "right":
712
+ if return_attention_mask:
713
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
714
+ if "token_type_ids" in encoded_inputs:
715
+ encoded_inputs["token_type_ids"] = (
716
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
717
+ )
718
+ if "bbox" in encoded_inputs:
719
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
720
+ if "labels" in encoded_inputs:
721
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
722
+ if "special_tokens_mask" in encoded_inputs:
723
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
724
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
725
+ elif self.padding_side == "left":
726
+ if return_attention_mask:
727
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
728
+ if "token_type_ids" in encoded_inputs:
729
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
730
+ "token_type_ids"
731
+ ]
732
+ if "bbox" in encoded_inputs:
733
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
734
+ if "labels" in encoded_inputs:
735
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
736
+ if "special_tokens_mask" in encoded_inputs:
737
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
738
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
739
+ else:
740
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
741
+
742
+ return encoded_inputs
743
+
744
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
745
+ """
746
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
747
+ adding special tokens. A BERT sequence has the following format:
748
+
749
+ - single sequence: `[CLS] X [SEP]`
750
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
751
+
752
+ Args:
753
+ token_ids_0 (`List[int]`):
754
+ List of IDs to which the special tokens will be added.
755
+ token_ids_1 (`List[int]`, *optional*):
756
+ Optional second list of IDs for sequence pairs.
757
+
758
+ Returns:
759
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
760
+ """
761
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
762
+
763
+ if token_ids_1:
764
+ output += token_ids_1 + [self.sep_token_id]
765
+
766
+ return output
767
+
768
+ def create_token_type_ids_from_sequences(
769
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
770
+ ) -> List[int]:
771
+ """
772
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
773
+ pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
774
+ sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
775
+
776
+ Args:
777
+ token_ids_0 (`List[int]`):
778
+ List of IDs.
779
+ token_ids_1 (`List[int]`, *optional*):
780
+ Optional second list of IDs for sequence pairs.
781
+
782
+ Returns:
783
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
784
+ """
785
+ sep = [self.sep_token_id]
786
+ cls = [self.cls_token_id]
787
+ if token_ids_1 is None:
788
+ return len(cls + token_ids_0 + sep) * [0]
789
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
790
+
791
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
792
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
793
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.24 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/configuration_layoutlmv3.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/feature_extraction_layoutlmv3.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/image_processing_layoutlmv3.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_layoutlmv3.cpython-310.pyc ADDED
Binary file (42.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/modeling_tf_layoutlmv3.cpython-310.pyc ADDED
Binary file (51.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/processing_layoutlmv3.cpython-310.pyc ADDED
Binary file (7.24 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/__pycache__/tokenization_layoutlmv3_fast.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py ADDED
@@ -0,0 +1,837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Fast tokenization class for LayoutLMv3. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
17
+ and _encode_plus, in which the Rust tokenizer is used.
18
+ """
19
+
20
+ import json
21
+ from typing import Dict, List, Optional, Tuple, Union
22
+
23
+ from tokenizers import pre_tokenizers, processors
24
+
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ EncodedInput,
28
+ PaddingStrategy,
29
+ PreTokenizedInput,
30
+ TensorType,
31
+ TextInput,
32
+ TextInputPair,
33
+ TruncationStrategy,
34
+ )
35
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
36
+ from ...utils import add_end_docstrings, logging
37
+ from .tokenization_layoutlmv3 import (
38
+ LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING,
39
+ LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
40
+ LayoutLMv3Tokenizer,
41
+ )
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
47
+
48
+
49
+ class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
50
+ r"""
51
+ Construct a "fast" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE.
52
+
53
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
54
+ refer to this superclass for more information regarding those methods.
55
+
56
+ Args:
57
+ vocab_file (`str`):
58
+ Path to the vocabulary file.
59
+ merges_file (`str`):
60
+ Path to the merges file.
61
+ errors (`str`, *optional*, defaults to `"replace"`):
62
+ Paradigm to follow when decoding bytes to UTF-8. See
63
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
64
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
65
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
66
+
67
+ <Tip>
68
+
69
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
70
+ sequence. The token used is the `cls_token`.
71
+
72
+ </Tip>
73
+
74
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
75
+ The end of sequence token.
76
+
77
+ <Tip>
78
+
79
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
80
+ The token used is the `sep_token`.
81
+
82
+ </Tip>
83
+
84
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
85
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
86
+ sequence classification or for a text and a question for question answering. It is also used as the last
87
+ token of a sequence built with special tokens.
88
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
89
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
90
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
91
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
92
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
93
+ token instead.
94
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
95
+ The token used for padding, for example when batching sequences of different lengths.
96
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
97
+ The token used for masking values. This is the token used when training this model with masked language
98
+ modeling. This is the token which the model will try to predict.
99
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
100
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
101
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
102
+ trim_offsets (`bool`, *optional*, defaults to `True`):
103
+ Whether the post processing step should trim offsets to avoid including whitespaces.
104
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
105
+ The bounding box to use for the special [CLS] token.
106
+ sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
107
+ The bounding box to use for the special [SEP] token.
108
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
109
+ The bounding box to use for the special [PAD] token.
110
+ pad_token_label (`int`, *optional*, defaults to -100):
111
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
112
+ CrossEntropyLoss.
113
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
114
+ Whether or not to only label the first subword, in case word labels are provided.
115
+ """
116
+
117
+ vocab_files_names = VOCAB_FILES_NAMES
118
+ model_input_names = ["input_ids", "attention_mask"]
119
+ slow_tokenizer_class = LayoutLMv3Tokenizer
120
+
121
+ def __init__(
122
+ self,
123
+ vocab_file=None,
124
+ merges_file=None,
125
+ tokenizer_file=None,
126
+ errors="replace",
127
+ bos_token="<s>",
128
+ eos_token="</s>",
129
+ sep_token="</s>",
130
+ cls_token="<s>",
131
+ unk_token="<unk>",
132
+ pad_token="<pad>",
133
+ mask_token="<mask>",
134
+ add_prefix_space=True,
135
+ trim_offsets=True,
136
+ cls_token_box=[0, 0, 0, 0],
137
+ sep_token_box=[0, 0, 0, 0],
138
+ pad_token_box=[0, 0, 0, 0],
139
+ pad_token_label=-100,
140
+ only_label_first_subword=True,
141
+ **kwargs,
142
+ ):
143
+ super().__init__(
144
+ vocab_file,
145
+ merges_file,
146
+ tokenizer_file=tokenizer_file,
147
+ errors=errors,
148
+ bos_token=bos_token,
149
+ eos_token=eos_token,
150
+ sep_token=sep_token,
151
+ cls_token=cls_token,
152
+ unk_token=unk_token,
153
+ pad_token=pad_token,
154
+ mask_token=mask_token,
155
+ add_prefix_space=add_prefix_space,
156
+ trim_offsets=trim_offsets,
157
+ cls_token_box=cls_token_box,
158
+ sep_token_box=sep_token_box,
159
+ pad_token_box=pad_token_box,
160
+ pad_token_label=pad_token_label,
161
+ only_label_first_subword=only_label_first_subword,
162
+ **kwargs,
163
+ )
164
+
165
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
166
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
167
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
168
+ pre_tok_state["add_prefix_space"] = add_prefix_space
169
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
170
+
171
+ self.add_prefix_space = add_prefix_space
172
+
173
+ tokenizer_component = "post_processor"
174
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
175
+ if tokenizer_component_instance:
176
+ state = json.loads(tokenizer_component_instance.__getstate__())
177
+
178
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
179
+ if "sep" in state:
180
+ state["sep"] = tuple(state["sep"])
181
+ if "cls" in state:
182
+ state["cls"] = tuple(state["cls"])
183
+
184
+ changes_to_apply = False
185
+
186
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
187
+ state["add_prefix_space"] = add_prefix_space
188
+ changes_to_apply = True
189
+
190
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
191
+ state["trim_offsets"] = trim_offsets
192
+ changes_to_apply = True
193
+
194
+ if changes_to_apply:
195
+ component_class = getattr(processors, state.pop("type"))
196
+ new_value = component_class(**state)
197
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
198
+
199
+ # additional properties
200
+ self.cls_token_box = cls_token_box
201
+ self.sep_token_box = sep_token_box
202
+ self.pad_token_box = pad_token_box
203
+ self.pad_token_label = pad_token_label
204
+ self.only_label_first_subword = only_label_first_subword
205
+
206
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
207
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.__call__
208
+ def __call__(
209
+ self,
210
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
211
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
212
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
213
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
214
+ add_special_tokens: bool = True,
215
+ padding: Union[bool, str, PaddingStrategy] = False,
216
+ truncation: Union[bool, str, TruncationStrategy] = None,
217
+ max_length: Optional[int] = None,
218
+ stride: int = 0,
219
+ pad_to_multiple_of: Optional[int] = None,
220
+ return_tensors: Optional[Union[str, TensorType]] = None,
221
+ return_token_type_ids: Optional[bool] = None,
222
+ return_attention_mask: Optional[bool] = None,
223
+ return_overflowing_tokens: bool = False,
224
+ return_special_tokens_mask: bool = False,
225
+ return_offsets_mapping: bool = False,
226
+ return_length: bool = False,
227
+ verbose: bool = True,
228
+ **kwargs,
229
+ ) -> BatchEncoding:
230
+ """
231
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
232
+ sequences with word-level normalized bounding boxes and optional labels.
233
+
234
+ Args:
235
+ text (`str`, `List[str]`, `List[List[str]]`):
236
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
237
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
238
+ words).
239
+ text_pair (`List[str]`, `List[List[str]]`):
240
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
241
+ (pretokenized string).
242
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
243
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
244
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
245
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
246
+ """
247
+
248
+ # Input type checking for clearer error
249
+ def _is_valid_text_input(t):
250
+ if isinstance(t, str):
251
+ # Strings are fine
252
+ return True
253
+ elif isinstance(t, (list, tuple)):
254
+ # List are fine as long as they are...
255
+ if len(t) == 0:
256
+ # ... empty
257
+ return True
258
+ elif isinstance(t[0], str):
259
+ # ... list of strings
260
+ return True
261
+ elif isinstance(t[0], (list, tuple)):
262
+ # ... list with an empty list or with a list of strings
263
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
264
+ else:
265
+ return False
266
+ else:
267
+ return False
268
+
269
+ if text_pair is not None:
270
+ # in case text + text_pair are provided, text = questions, text_pair = words
271
+ if not _is_valid_text_input(text):
272
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
273
+ if not isinstance(text_pair, (list, tuple)):
274
+ raise ValueError(
275
+ "Words must be of type `List[str]` (single pretokenized example), "
276
+ "or `List[List[str]]` (batch of pretokenized examples)."
277
+ )
278
+ else:
279
+ # in case only text is provided => must be words
280
+ if not isinstance(text, (list, tuple)):
281
+ raise ValueError(
282
+ "Words must be of type `List[str]` (single pretokenized example), "
283
+ "or `List[List[str]]` (batch of pretokenized examples)."
284
+ )
285
+
286
+ if text_pair is not None:
287
+ is_batched = isinstance(text, (list, tuple))
288
+ else:
289
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
290
+
291
+ words = text if text_pair is None else text_pair
292
+ if boxes is None:
293
+ raise ValueError("You must provide corresponding bounding boxes")
294
+ if is_batched:
295
+ if len(words) != len(boxes):
296
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
297
+ for words_example, boxes_example in zip(words, boxes):
298
+ if len(words_example) != len(boxes_example):
299
+ raise ValueError("You must provide as many words as there are bounding boxes")
300
+ else:
301
+ if len(words) != len(boxes):
302
+ raise ValueError("You must provide as many words as there are bounding boxes")
303
+
304
+ if is_batched:
305
+ if text_pair is not None and len(text) != len(text_pair):
306
+ raise ValueError(
307
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
308
+ f" {len(text_pair)}."
309
+ )
310
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
311
+ is_pair = bool(text_pair is not None)
312
+ return self.batch_encode_plus(
313
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
314
+ is_pair=is_pair,
315
+ boxes=boxes,
316
+ word_labels=word_labels,
317
+ add_special_tokens=add_special_tokens,
318
+ padding=padding,
319
+ truncation=truncation,
320
+ max_length=max_length,
321
+ stride=stride,
322
+ pad_to_multiple_of=pad_to_multiple_of,
323
+ return_tensors=return_tensors,
324
+ return_token_type_ids=return_token_type_ids,
325
+ return_attention_mask=return_attention_mask,
326
+ return_overflowing_tokens=return_overflowing_tokens,
327
+ return_special_tokens_mask=return_special_tokens_mask,
328
+ return_offsets_mapping=return_offsets_mapping,
329
+ return_length=return_length,
330
+ verbose=verbose,
331
+ **kwargs,
332
+ )
333
+ else:
334
+ return self.encode_plus(
335
+ text=text,
336
+ text_pair=text_pair,
337
+ boxes=boxes,
338
+ word_labels=word_labels,
339
+ add_special_tokens=add_special_tokens,
340
+ padding=padding,
341
+ truncation=truncation,
342
+ max_length=max_length,
343
+ stride=stride,
344
+ pad_to_multiple_of=pad_to_multiple_of,
345
+ return_tensors=return_tensors,
346
+ return_token_type_ids=return_token_type_ids,
347
+ return_attention_mask=return_attention_mask,
348
+ return_overflowing_tokens=return_overflowing_tokens,
349
+ return_special_tokens_mask=return_special_tokens_mask,
350
+ return_offsets_mapping=return_offsets_mapping,
351
+ return_length=return_length,
352
+ verbose=verbose,
353
+ **kwargs,
354
+ )
355
+
356
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
357
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.batch_encode_plus
358
+ def batch_encode_plus(
359
+ self,
360
+ batch_text_or_text_pairs: Union[
361
+ List[TextInput],
362
+ List[TextInputPair],
363
+ List[PreTokenizedInput],
364
+ ],
365
+ is_pair: bool = None,
366
+ boxes: Optional[List[List[List[int]]]] = None,
367
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
368
+ add_special_tokens: bool = True,
369
+ padding: Union[bool, str, PaddingStrategy] = False,
370
+ truncation: Union[bool, str, TruncationStrategy] = None,
371
+ max_length: Optional[int] = None,
372
+ stride: int = 0,
373
+ pad_to_multiple_of: Optional[int] = None,
374
+ return_tensors: Optional[Union[str, TensorType]] = None,
375
+ return_token_type_ids: Optional[bool] = None,
376
+ return_attention_mask: Optional[bool] = None,
377
+ return_overflowing_tokens: bool = False,
378
+ return_special_tokens_mask: bool = False,
379
+ return_offsets_mapping: bool = False,
380
+ return_length: bool = False,
381
+ verbose: bool = True,
382
+ **kwargs,
383
+ ) -> BatchEncoding:
384
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
385
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
386
+ padding=padding,
387
+ truncation=truncation,
388
+ max_length=max_length,
389
+ pad_to_multiple_of=pad_to_multiple_of,
390
+ verbose=verbose,
391
+ **kwargs,
392
+ )
393
+
394
+ return self._batch_encode_plus(
395
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
396
+ is_pair=is_pair,
397
+ boxes=boxes,
398
+ word_labels=word_labels,
399
+ add_special_tokens=add_special_tokens,
400
+ padding_strategy=padding_strategy,
401
+ truncation_strategy=truncation_strategy,
402
+ max_length=max_length,
403
+ stride=stride,
404
+ pad_to_multiple_of=pad_to_multiple_of,
405
+ return_tensors=return_tensors,
406
+ return_token_type_ids=return_token_type_ids,
407
+ return_attention_mask=return_attention_mask,
408
+ return_overflowing_tokens=return_overflowing_tokens,
409
+ return_special_tokens_mask=return_special_tokens_mask,
410
+ return_offsets_mapping=return_offsets_mapping,
411
+ return_length=return_length,
412
+ verbose=verbose,
413
+ **kwargs,
414
+ )
415
+
416
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.tokenize
417
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
418
+ batched_input = [(text, pair)] if pair else [text]
419
+ encodings = self._tokenizer.encode_batch(
420
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
421
+ )
422
+
423
+ return encodings[0].tokens
424
+
425
+ @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
426
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.encode_plus
427
+ def encode_plus(
428
+ self,
429
+ text: Union[TextInput, PreTokenizedInput],
430
+ text_pair: Optional[PreTokenizedInput] = None,
431
+ boxes: Optional[List[List[int]]] = None,
432
+ word_labels: Optional[List[int]] = None,
433
+ add_special_tokens: bool = True,
434
+ padding: Union[bool, str, PaddingStrategy] = False,
435
+ truncation: Union[bool, str, TruncationStrategy] = None,
436
+ max_length: Optional[int] = None,
437
+ stride: int = 0,
438
+ pad_to_multiple_of: Optional[int] = None,
439
+ return_tensors: Optional[Union[str, TensorType]] = None,
440
+ return_token_type_ids: Optional[bool] = None,
441
+ return_attention_mask: Optional[bool] = None,
442
+ return_overflowing_tokens: bool = False,
443
+ return_special_tokens_mask: bool = False,
444
+ return_offsets_mapping: bool = False,
445
+ return_length: bool = False,
446
+ verbose: bool = True,
447
+ **kwargs,
448
+ ) -> BatchEncoding:
449
+ """
450
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
451
+ `__call__` should be used instead.
452
+
453
+ Args:
454
+ text (`str`, `List[str]`, `List[List[str]]`):
455
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
456
+ text_pair (`List[str]` or `List[int]`, *optional*):
457
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
458
+ list of list of strings (words of a batch of examples).
459
+ """
460
+
461
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
462
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
463
+ padding=padding,
464
+ truncation=truncation,
465
+ max_length=max_length,
466
+ pad_to_multiple_of=pad_to_multiple_of,
467
+ verbose=verbose,
468
+ **kwargs,
469
+ )
470
+
471
+ return self._encode_plus(
472
+ text=text,
473
+ boxes=boxes,
474
+ text_pair=text_pair,
475
+ word_labels=word_labels,
476
+ add_special_tokens=add_special_tokens,
477
+ padding_strategy=padding_strategy,
478
+ truncation_strategy=truncation_strategy,
479
+ max_length=max_length,
480
+ stride=stride,
481
+ pad_to_multiple_of=pad_to_multiple_of,
482
+ return_tensors=return_tensors,
483
+ return_token_type_ids=return_token_type_ids,
484
+ return_attention_mask=return_attention_mask,
485
+ return_overflowing_tokens=return_overflowing_tokens,
486
+ return_special_tokens_mask=return_special_tokens_mask,
487
+ return_offsets_mapping=return_offsets_mapping,
488
+ return_length=return_length,
489
+ verbose=verbose,
490
+ **kwargs,
491
+ )
492
+
493
+ def _batch_encode_plus(
494
+ self,
495
+ batch_text_or_text_pairs: Union[
496
+ List[TextInput],
497
+ List[TextInputPair],
498
+ List[PreTokenizedInput],
499
+ ],
500
+ is_pair: bool = None,
501
+ boxes: Optional[List[List[List[int]]]] = None,
502
+ word_labels: Optional[List[List[int]]] = None,
503
+ add_special_tokens: bool = True,
504
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
505
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
506
+ max_length: Optional[int] = None,
507
+ stride: int = 0,
508
+ pad_to_multiple_of: Optional[int] = None,
509
+ return_tensors: Optional[str] = None,
510
+ return_token_type_ids: Optional[bool] = None,
511
+ return_attention_mask: Optional[bool] = None,
512
+ return_overflowing_tokens: bool = False,
513
+ return_special_tokens_mask: bool = False,
514
+ return_offsets_mapping: bool = False,
515
+ return_length: bool = False,
516
+ verbose: bool = True,
517
+ ) -> BatchEncoding:
518
+ if not isinstance(batch_text_or_text_pairs, list):
519
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
520
+
521
+ # Set the truncation and padding strategy and restore the initial configuration
522
+ self.set_truncation_and_padding(
523
+ padding_strategy=padding_strategy,
524
+ truncation_strategy=truncation_strategy,
525
+ max_length=max_length,
526
+ stride=stride,
527
+ pad_to_multiple_of=pad_to_multiple_of,
528
+ )
529
+
530
+ if is_pair:
531
+ batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
532
+
533
+ encodings = self._tokenizer.encode_batch(
534
+ batch_text_or_text_pairs,
535
+ add_special_tokens=add_special_tokens,
536
+ is_pretokenized=True, # we set this to True as LayoutLMv3 always expects pretokenized inputs
537
+ )
538
+
539
+ # Convert encoding to dict
540
+ # `Tokens` has type: Tuple[
541
+ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
542
+ # List[EncodingFast]
543
+ # ]
544
+ # with nested dimensions corresponding to batch, overflows, sequence length
545
+ tokens_and_encodings = [
546
+ self._convert_encoding(
547
+ encoding=encoding,
548
+ return_token_type_ids=return_token_type_ids,
549
+ return_attention_mask=return_attention_mask,
550
+ return_overflowing_tokens=return_overflowing_tokens,
551
+ return_special_tokens_mask=return_special_tokens_mask,
552
+ return_offsets_mapping=True
553
+ if word_labels is not None
554
+ else return_offsets_mapping, # we use offsets to create the labels
555
+ return_length=return_length,
556
+ verbose=verbose,
557
+ )
558
+ for encoding in encodings
559
+ ]
560
+
561
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
562
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
563
+ # (we say ~ because the number of overflow varies with the example in the batch)
564
+ #
565
+ # To match each overflowing sample with the original sample in the batch
566
+ # we add an overflow_to_sample_mapping array (see below)
567
+ sanitized_tokens = {}
568
+ for key in tokens_and_encodings[0][0].keys():
569
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
570
+ sanitized_tokens[key] = stack
571
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
572
+
573
+ # If returning overflowing tokens, we need to return a mapping
574
+ # from the batch idx to the original sample
575
+ if return_overflowing_tokens:
576
+ overflow_to_sample_mapping = []
577
+ for i, (toks, _) in enumerate(tokens_and_encodings):
578
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
579
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
580
+
581
+ for input_ids in sanitized_tokens["input_ids"]:
582
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
583
+
584
+ # create the token boxes
585
+ token_boxes = []
586
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
587
+ if return_overflowing_tokens:
588
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
589
+ else:
590
+ original_index = batch_index
591
+ token_boxes_example = []
592
+ for id, sequence_id, word_id in zip(
593
+ sanitized_tokens["input_ids"][batch_index],
594
+ sanitized_encodings[batch_index].sequence_ids,
595
+ sanitized_encodings[batch_index].word_ids,
596
+ ):
597
+ if word_id is not None:
598
+ if is_pair and sequence_id == 0:
599
+ token_boxes_example.append(self.pad_token_box)
600
+ else:
601
+ token_boxes_example.append(boxes[original_index][word_id])
602
+ else:
603
+ if id == self.cls_token_id:
604
+ token_boxes_example.append(self.cls_token_box)
605
+ elif id == self.sep_token_id:
606
+ token_boxes_example.append(self.sep_token_box)
607
+ elif id == self.pad_token_id:
608
+ token_boxes_example.append(self.pad_token_box)
609
+ else:
610
+ raise ValueError("Id not recognized")
611
+ token_boxes.append(token_boxes_example)
612
+
613
+ sanitized_tokens["bbox"] = token_boxes
614
+
615
+ # optionally, create the labels
616
+ if word_labels is not None:
617
+ labels = []
618
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
619
+ if return_overflowing_tokens:
620
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
621
+ else:
622
+ original_index = batch_index
623
+ labels_example = []
624
+ previous_token_empty = False
625
+ for id, offset, word_id in zip(
626
+ sanitized_tokens["input_ids"][batch_index],
627
+ sanitized_tokens["offset_mapping"][batch_index],
628
+ sanitized_encodings[batch_index].word_ids,
629
+ ):
630
+ if word_id is not None:
631
+ if self.only_label_first_subword:
632
+ if offset[0] == 0 and not previous_token_empty:
633
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
634
+ labels_example.append(word_labels[original_index][word_id])
635
+ else:
636
+ labels_example.append(self.pad_token_label)
637
+ if offset == (0, 0):
638
+ previous_token_empty = True
639
+ else:
640
+ previous_token_empty = False
641
+ else:
642
+ labels_example.append(word_labels[original_index][word_id])
643
+ else:
644
+ labels_example.append(self.pad_token_label)
645
+ labels.append(labels_example)
646
+
647
+ sanitized_tokens["labels"] = labels
648
+ # finally, remove offsets if the user didn't want them
649
+ if not return_offsets_mapping:
650
+ del sanitized_tokens["offset_mapping"]
651
+
652
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
653
+
654
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._encode_plus
655
+ def _encode_plus(
656
+ self,
657
+ text: Union[TextInput, PreTokenizedInput],
658
+ text_pair: Optional[PreTokenizedInput] = None,
659
+ boxes: Optional[List[List[int]]] = None,
660
+ word_labels: Optional[List[int]] = None,
661
+ add_special_tokens: bool = True,
662
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
663
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
664
+ max_length: Optional[int] = None,
665
+ stride: int = 0,
666
+ pad_to_multiple_of: Optional[int] = None,
667
+ return_tensors: Optional[bool] = None,
668
+ return_token_type_ids: Optional[bool] = None,
669
+ return_attention_mask: Optional[bool] = None,
670
+ return_overflowing_tokens: bool = False,
671
+ return_special_tokens_mask: bool = False,
672
+ return_offsets_mapping: bool = False,
673
+ return_length: bool = False,
674
+ verbose: bool = True,
675
+ **kwargs,
676
+ ) -> BatchEncoding:
677
+ # make it a batched input
678
+ # 2 options:
679
+ # 1) only text, in case text must be a list of str
680
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
681
+ batched_input = [(text, text_pair)] if text_pair else [text]
682
+ batched_boxes = [boxes]
683
+ batched_word_labels = [word_labels] if word_labels is not None else None
684
+ batched_output = self._batch_encode_plus(
685
+ batched_input,
686
+ is_pair=bool(text_pair is not None),
687
+ boxes=batched_boxes,
688
+ word_labels=batched_word_labels,
689
+ add_special_tokens=add_special_tokens,
690
+ padding_strategy=padding_strategy,
691
+ truncation_strategy=truncation_strategy,
692
+ max_length=max_length,
693
+ stride=stride,
694
+ pad_to_multiple_of=pad_to_multiple_of,
695
+ return_tensors=return_tensors,
696
+ return_token_type_ids=return_token_type_ids,
697
+ return_attention_mask=return_attention_mask,
698
+ return_overflowing_tokens=return_overflowing_tokens,
699
+ return_special_tokens_mask=return_special_tokens_mask,
700
+ return_offsets_mapping=return_offsets_mapping,
701
+ return_length=return_length,
702
+ verbose=verbose,
703
+ **kwargs,
704
+ )
705
+
706
+ # Return tensor is None, then we can remove the leading batch axis
707
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
708
+ if return_tensors is None and not return_overflowing_tokens:
709
+ batched_output = BatchEncoding(
710
+ {
711
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
712
+ for key, value in batched_output.items()
713
+ },
714
+ batched_output.encodings,
715
+ )
716
+
717
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
718
+
719
+ return batched_output
720
+
721
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._pad
722
+ def _pad(
723
+ self,
724
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
725
+ max_length: Optional[int] = None,
726
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
727
+ pad_to_multiple_of: Optional[int] = None,
728
+ return_attention_mask: Optional[bool] = None,
729
+ ) -> dict:
730
+ """
731
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
732
+
733
+ Args:
734
+ encoded_inputs:
735
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
736
+ max_length: maximum length of the returned list and optionally padding length (see below).
737
+ Will truncate by taking into account the special tokens.
738
+ padding_strategy: PaddingStrategy to use for padding.
739
+
740
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
741
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
742
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
743
+ The tokenizer padding sides are defined in self.padding_side:
744
+
745
+ - 'left': pads on the left of the sequences
746
+ - 'right': pads on the right of the sequences
747
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
748
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
749
+ `>= 7.5` (Volta).
750
+ return_attention_mask:
751
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
752
+ """
753
+ # Load from model defaults
754
+ if return_attention_mask is None:
755
+ return_attention_mask = "attention_mask" in self.model_input_names
756
+
757
+ required_input = encoded_inputs[self.model_input_names[0]]
758
+
759
+ if padding_strategy == PaddingStrategy.LONGEST:
760
+ max_length = len(required_input)
761
+
762
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
763
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
764
+
765
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
766
+
767
+ # Initialize attention mask if not present.
768
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
769
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
770
+
771
+ if needs_to_be_padded:
772
+ difference = max_length - len(required_input)
773
+ if self.padding_side == "right":
774
+ if return_attention_mask:
775
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
776
+ if "token_type_ids" in encoded_inputs:
777
+ encoded_inputs["token_type_ids"] = (
778
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
779
+ )
780
+ if "bbox" in encoded_inputs:
781
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
782
+ if "labels" in encoded_inputs:
783
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
784
+ if "special_tokens_mask" in encoded_inputs:
785
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
786
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
787
+ elif self.padding_side == "left":
788
+ if return_attention_mask:
789
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
790
+ if "token_type_ids" in encoded_inputs:
791
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
792
+ "token_type_ids"
793
+ ]
794
+ if "bbox" in encoded_inputs:
795
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
796
+ if "labels" in encoded_inputs:
797
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
798
+ if "special_tokens_mask" in encoded_inputs:
799
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
800
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
801
+ else:
802
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
803
+
804
+ return encoded_inputs
805
+
806
+ # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.save_vocabulary
807
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
808
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
809
+ return tuple(files)
810
+
811
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
812
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
813
+ if token_ids_1 is None:
814
+ return output
815
+
816
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
817
+
818
+ def create_token_type_ids_from_sequences(
819
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
820
+ ) -> List[int]:
821
+ """
822
+ Args:
823
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:
824
+ make use of token type ids, therefore a list of zeros is returned.
825
+ token_ids_0 (`List[int]`):
826
+ List of IDs.
827
+ token_ids_1 (`List[int]`, *optional*):
828
+ Optional second list of IDs for sequence pairs.
829
+ Returns:
830
+ `List[int]`: List of zeros.
831
+ """
832
+ sep = [self.sep_token_id]
833
+ cls = [self.cls_token_id]
834
+
835
+ if token_ids_1 is None:
836
+ return len(cls + token_ids_0 + sep) * [0]
837
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/__init__.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
17
+
18
+
19
+ _import_structure = {}
20
+
21
+ try:
22
+ if not is_sentencepiece_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["tokenization_mbart50"] = ["MBart50Tokenizer"]
28
+
29
+ try:
30
+ if not is_tokenizers_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_mbart50_fast"] = ["MBart50TokenizerFast"]
36
+
37
+
38
+ if TYPE_CHECKING:
39
+ try:
40
+ if not is_sentencepiece_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ from .tokenization_mbart50 import MBart50Tokenizer
46
+
47
+ try:
48
+ if not is_tokenizers_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .tokenization_mbart50_fast import MBart50TokenizerFast
54
+
55
+ else:
56
+ import sys
57
+
58
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (927 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/__pycache__/tokenization_mbart50.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/tokenization_mbart50.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ SPIECE_UNDERLINE = "▁"
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
31
+
32
+
33
+ FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] # fmt: skip
34
+
35
+
36
+ class MBart50Tokenizer(PreTrainedTokenizer):
37
+ """
38
+ Construct a MBart50 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
39
+
40
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
41
+ this superclass for more information regarding those methods.
42
+
43
+ Args:
44
+ vocab_file (`str`):
45
+ Path to the vocabulary file.
46
+ src_lang (`str`, *optional*):
47
+ A string representing the source language.
48
+ tgt_lang (`str`, *optional*):
49
+ A string representing the target language.
50
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
51
+ The end of sequence token.
52
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
53
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
54
+ sequence classification or for a text and a question for question answering. It is also used as the last
55
+ token of a sequence built with special tokens.
56
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
57
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
58
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
59
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
60
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
61
+ token instead.
62
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
63
+ The token used for padding, for example when batching sequences of different lengths.
64
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
65
+ The token used for masking values. This is the token used when training this model with masked language
66
+ modeling. This is the token which the model will try to predict.
67
+ sp_model_kwargs (`dict`, *optional*):
68
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
69
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
70
+ to set:
71
+
72
+ - `enable_sampling`: Enable subword regularization.
73
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
74
+
75
+ - `nbest_size = {0,1}`: No sampling is performed.
76
+ - `nbest_size > 1`: samples from the nbest_size results.
77
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
78
+ using forward-filtering-and-backward-sampling algorithm.
79
+
80
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
81
+ BPE-dropout.
82
+
83
+ Examples:
84
+
85
+ ```python
86
+ >>> from transformers import MBart50Tokenizer
87
+
88
+ >>> tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
89
+ >>> src_text = " UN Chief Says There Is No Military Solution in Syria"
90
+ >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
91
+ >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
92
+ >>> # model(**model_inputs) should work
93
+ ```"""
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+ model_input_names = ["input_ids", "attention_mask"]
97
+
98
+ prefix_tokens: List[int] = []
99
+ suffix_tokens: List[int] = []
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_file,
104
+ src_lang=None,
105
+ tgt_lang=None,
106
+ eos_token="</s>",
107
+ sep_token="</s>",
108
+ cls_token="<s>",
109
+ unk_token="<unk>",
110
+ pad_token="<pad>",
111
+ mask_token="<mask>",
112
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
113
+ **kwargs,
114
+ ) -> None:
115
+ # Mask token behave like a normal word, i.e. include the space before it
116
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
117
+
118
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
119
+
120
+ kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or []
121
+ kwargs["additional_special_tokens"] += [
122
+ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
123
+ ]
124
+
125
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
126
+ self.sp_model.Load(str(vocab_file))
127
+ self.vocab_file = vocab_file
128
+
129
+ # Original fairseq vocab and spm vocab must be "aligned":
130
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
131
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
132
+ # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
133
+ # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
134
+
135
+ # Mimic fairseq token-to-id alignment for the first 4 token
136
+ self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
137
+
138
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
139
+ self.fairseq_offset = 1
140
+
141
+ self.sp_model_size = len(self.sp_model)
142
+ self.lang_code_to_id = {
143
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES)
144
+ }
145
+ self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
146
+ self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
147
+
148
+ self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
149
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
150
+
151
+ super().__init__(
152
+ src_lang=src_lang,
153
+ tgt_lang=tgt_lang,
154
+ eos_token=eos_token,
155
+ unk_token=unk_token,
156
+ sep_token=sep_token,
157
+ cls_token=cls_token,
158
+ pad_token=pad_token,
159
+ mask_token=mask_token,
160
+ sp_model_kwargs=self.sp_model_kwargs,
161
+ **kwargs,
162
+ )
163
+
164
+ self._src_lang = src_lang if src_lang is not None else "en_XX"
165
+ self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
166
+ self.tgt_lang = tgt_lang
167
+ self.set_src_lang_special_tokens(self._src_lang)
168
+
169
+ @property
170
+ def vocab_size(self) -> int:
171
+ return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
172
+
173
+ @property
174
+ def src_lang(self) -> str:
175
+ return self._src_lang
176
+
177
+ @src_lang.setter
178
+ def src_lang(self, new_src_lang: str) -> None:
179
+ self._src_lang = new_src_lang
180
+ self.set_src_lang_special_tokens(self._src_lang)
181
+
182
+ def __getstate__(self) -> Dict:
183
+ state = self.__dict__.copy()
184
+ state["sp_model"] = None
185
+ return state
186
+
187
+ def __setstate__(self, d: Dict) -> None:
188
+ self.__dict__ = d
189
+
190
+ # for backward compatibility
191
+ if not hasattr(self, "sp_model_kwargs"):
192
+ self.sp_model_kwargs = {}
193
+
194
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
195
+ self.sp_model.Load(self.vocab_file)
196
+
197
+ def get_vocab(self) -> Dict:
198
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
199
+ vocab.update(self.added_tokens_encoder)
200
+ return vocab
201
+
202
+ def _tokenize(self, text: str) -> List[str]:
203
+ return self.sp_model.encode(text, out_type=str)
204
+
205
+ def _convert_token_to_id(self, token: str) -> int:
206
+ """Converts a token (str) in an id using the vocab."""
207
+ if token in self.fairseq_tokens_to_ids:
208
+ return self.fairseq_tokens_to_ids[token]
209
+ spm_id = self.sp_model.PieceToId(token)
210
+
211
+ # Need to return unknown token if the SP model returned 0
212
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
213
+
214
+ def _convert_id_to_token(self, index: int) -> str:
215
+ """Converts an index (integer) in a token (str) using the vocab."""
216
+ if index in self.fairseq_ids_to_tokens:
217
+ return self.fairseq_ids_to_tokens[index]
218
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
219
+
220
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
221
+ def convert_tokens_to_string(self, tokens):
222
+ """Converts a sequence of tokens (string) in a single string."""
223
+ current_sub_tokens = []
224
+ out_string = ""
225
+ prev_is_special = False
226
+ for token in tokens:
227
+ # make sure that special tokens are not decoded using sentencepiece model
228
+ if token in self.all_special_tokens:
229
+ if not prev_is_special:
230
+ out_string += " "
231
+ out_string += self.sp_model.decode(current_sub_tokens) + token
232
+ prev_is_special = True
233
+ current_sub_tokens = []
234
+ else:
235
+ current_sub_tokens.append(token)
236
+ prev_is_special = False
237
+ out_string += self.sp_model.decode(current_sub_tokens)
238
+ return out_string.strip()
239
+
240
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
241
+ if not os.path.isdir(save_directory):
242
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
243
+ return
244
+ out_vocab_file = os.path.join(
245
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
246
+ )
247
+
248
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
249
+ copyfile(self.vocab_file, out_vocab_file)
250
+ elif not os.path.isfile(self.vocab_file):
251
+ with open(out_vocab_file, "wb") as fi:
252
+ content_spiece_model = self.sp_model.serialized_model_proto()
253
+ fi.write(content_spiece_model)
254
+
255
+ return (out_vocab_file,)
256
+
257
+ def get_special_tokens_mask(
258
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
259
+ ) -> List[int]:
260
+ """
261
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
262
+ special tokens using the tokenizer `prepare_for_model` method.
263
+
264
+ Args:
265
+ token_ids_0 (`List[int]`):
266
+ List of IDs.
267
+ token_ids_1 (`List[int]`, *optional*):
268
+ Optional second list of IDs for sequence pairs.
269
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
270
+ Whether or not the token list is already formatted with special tokens for the model.
271
+
272
+ Returns:
273
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
274
+ """
275
+
276
+ if already_has_special_tokens:
277
+ return super().get_special_tokens_mask(
278
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
279
+ )
280
+
281
+ prefix_ones = [1] * len(self.prefix_tokens)
282
+ suffix_ones = [1] * len(self.suffix_tokens)
283
+ if token_ids_1 is None:
284
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
285
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
286
+
287
+ def build_inputs_with_special_tokens(
288
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
289
+ ) -> List[int]:
290
+ """
291
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
292
+ adding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence:
293
+
294
+ - `input_ids` (for encoder) `[src_lang_code] X [eos]`
295
+ - `labels`: (for decoder) `[tgt_lang_code] X [eos]`
296
+
297
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
298
+ separator.
299
+
300
+ Args:
301
+ token_ids_0 (`List[int]`):
302
+ List of IDs to which the special tokens will be added.
303
+ token_ids_1 (`List[int]`, *optional*):
304
+ Optional second list of IDs for sequence pairs.
305
+
306
+ Returns:
307
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
308
+ """
309
+ if token_ids_1 is None:
310
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
311
+ # We don't expect to process pairs, but leave the pair logic for API consistency
312
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
313
+
314
+ def _build_translation_inputs(
315
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
316
+ ):
317
+ """Used by translation pipeline, to prepare inputs for the generate function"""
318
+ if src_lang is None or tgt_lang is None:
319
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
320
+ self.src_lang = src_lang
321
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
322
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
323
+ inputs["forced_bos_token_id"] = tgt_lang_id
324
+ return inputs
325
+
326
+ def prepare_seq2seq_batch(
327
+ self,
328
+ src_texts: List[str],
329
+ src_lang: str = "en_XX",
330
+ tgt_texts: Optional[List[str]] = None,
331
+ tgt_lang: str = "ro_RO",
332
+ **kwargs,
333
+ ) -> BatchEncoding:
334
+ self.src_lang = src_lang
335
+ self.tgt_lang = tgt_lang
336
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
337
+
338
+ def _switch_to_input_mode(self):
339
+ return self.set_src_lang_special_tokens(self.src_lang)
340
+
341
+ def _switch_to_target_mode(self):
342
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
343
+
344
+ def set_src_lang_special_tokens(self, src_lang: str) -> None:
345
+ """Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."""
346
+ self.cur_lang_code_id = self.lang_code_to_id[src_lang]
347
+ self.prefix_tokens = [self.cur_lang_code_id]
348
+ self.suffix_tokens = [self.eos_token_id]
349
+
350
+ def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
351
+ """Reset the special tokens to the target language setting. prefix=[tgt_lang_code] and suffix=[eos]."""
352
+ self.cur_lang_code_id = self.lang_code_to_id[tgt_lang]
353
+ self.prefix_tokens = [self.cur_lang_code_id]
354
+ self.suffix_tokens = [self.eos_token_id]
llmeval-env/lib/python3.10/site-packages/transformers/models/mbart50/tokenization_mbart50_fast.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from shutil import copyfile
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import processors
21
+
22
+ from ...tokenization_utils import AddedToken, BatchEncoding
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_mbart50 import MBart50Tokenizer
29
+ else:
30
+ MBart50Tokenizer = None
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
36
+
37
+
38
+ FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] # fmt: skip
39
+
40
+
41
+ class MBart50TokenizerFast(PreTrainedTokenizerFast):
42
+ """
43
+ Construct a "fast" MBART tokenizer for mBART-50 (backed by HuggingFace's *tokenizers* library). Based on
44
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
45
+
46
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
47
+ refer to this superclass for more information regarding those methods.
48
+
49
+ Args:
50
+ vocab_file (`str`):
51
+ Path to the vocabulary file.
52
+ src_lang (`str`, *optional*):
53
+ A string representing the source language.
54
+ tgt_lang (`str`, *optional*):
55
+ A string representing the target language.
56
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
57
+ The end of sequence token.
58
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
59
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
60
+ sequence classification or for a text and a question for question answering. It is also used as the last
61
+ token of a sequence built with special tokens.
62
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
63
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
64
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
65
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
66
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
67
+ token instead.
68
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
69
+ The token used for padding, for example when batching sequences of different lengths.
70
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
71
+ The token used for masking values. This is the token used when training this model with masked language
72
+ modeling. This is the token which the model will try to predict.
73
+
74
+ Examples:
75
+
76
+ ```python
77
+ >>> from transformers import MBart50TokenizerFast
78
+
79
+ >>> tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang="en_XX", tgt_lang="ro_RO")
80
+ >>> src_text = " UN Chief Says There Is No Military Solution in Syria"
81
+ >>> tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
82
+ >>> model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
83
+ >>> # model(**model_inputs) should work
84
+ ```"""
85
+
86
+ vocab_files_names = VOCAB_FILES_NAMES
87
+ model_input_names = ["input_ids", "attention_mask"]
88
+ slow_tokenizer_class = MBart50Tokenizer
89
+
90
+ prefix_tokens: List[int] = []
91
+ suffix_tokens: List[int] = []
92
+
93
+ def __init__(
94
+ self,
95
+ vocab_file=None,
96
+ src_lang=None,
97
+ tgt_lang=None,
98
+ tokenizer_file=None,
99
+ eos_token="</s>",
100
+ sep_token="</s>",
101
+ cls_token="<s>",
102
+ unk_token="<unk>",
103
+ pad_token="<pad>",
104
+ mask_token="<mask>",
105
+ **kwargs,
106
+ ):
107
+ # Mask token behave like a normal word, i.e. include the space before it
108
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
109
+
110
+ kwargs["additional_special_tokens"] = kwargs.get("additional_special_tokens", []) or []
111
+ kwargs["additional_special_tokens"] += [
112
+ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
113
+ ]
114
+
115
+ super().__init__(
116
+ vocab_file,
117
+ src_lang=src_lang,
118
+ tgt_lang=tgt_lang,
119
+ tokenizer_file=tokenizer_file,
120
+ eos_token=eos_token,
121
+ sep_token=sep_token,
122
+ cls_token=cls_token,
123
+ unk_token=unk_token,
124
+ pad_token=pad_token,
125
+ mask_token=mask_token,
126
+ **kwargs,
127
+ )
128
+
129
+ self.vocab_file = vocab_file
130
+
131
+ self.lang_code_to_id = {
132
+ lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES
133
+ }
134
+
135
+ self._src_lang = src_lang if src_lang is not None else "en_XX"
136
+ self.tgt_lang = tgt_lang
137
+ self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
138
+ self.set_src_lang_special_tokens(self._src_lang)
139
+
140
+ @property
141
+ def can_save_slow_tokenizer(self) -> bool:
142
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
143
+
144
+ @property
145
+ def src_lang(self) -> str:
146
+ return self._src_lang
147
+
148
+ @src_lang.setter
149
+ def src_lang(self, new_src_lang: str) -> None:
150
+ self._src_lang = new_src_lang
151
+ self.set_src_lang_special_tokens(self._src_lang)
152
+
153
+ def build_inputs_with_special_tokens(
154
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
155
+ ) -> List[int]:
156
+ """
157
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
158
+ adding special tokens. The special tokens depend on calling set_lang.
159
+
160
+ An MBART-50 sequence has the following format, where `X` represents the sequence:
161
+
162
+ - `input_ids` (for encoder) `[src_lang_code] X [eos]`
163
+ - `labels`: (for decoder) `[tgt_lang_code] X [eos]`
164
+
165
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
166
+ separator.
167
+
168
+ Args:
169
+ token_ids_0 (`List[int]`):
170
+ List of IDs to which the special tokens will be added.
171
+ token_ids_1 (`List[int]`, *optional*):
172
+ Optional second list of IDs for sequence pairs.
173
+
174
+ Returns:
175
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
176
+ """
177
+ if token_ids_1 is None:
178
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
179
+ # We don't expect to process pairs, but leave the pair logic for API consistency
180
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
181
+
182
+ def prepare_seq2seq_batch(
183
+ self,
184
+ src_texts: List[str],
185
+ src_lang: str = "en_XX",
186
+ tgt_texts: Optional[List[str]] = None,
187
+ tgt_lang: str = "ro_RO",
188
+ **kwargs,
189
+ ) -> BatchEncoding:
190
+ self.src_lang = src_lang
191
+ self.tgt_lang = tgt_lang
192
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
193
+
194
+ def _switch_to_input_mode(self):
195
+ return self.set_src_lang_special_tokens(self.src_lang)
196
+
197
+ def _switch_to_target_mode(self):
198
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
199
+
200
+ def set_src_lang_special_tokens(self, src_lang: str) -> None:
201
+ """Reset the special tokens to the source lang setting. prefix=[src_lang_code] and suffix=[eos]."""
202
+ self.cur_lang_code_id = self.convert_tokens_to_ids(src_lang)
203
+ self.prefix_tokens = [self.cur_lang_code_id]
204
+ self.suffix_tokens = [self.eos_token_id]
205
+
206
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
207
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
208
+
209
+ self._tokenizer.post_processor = processors.TemplateProcessing(
210
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
211
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
212
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
213
+ )
214
+
215
+ def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
216
+ """Reset the special tokens to the target language setting. prefix=[src_lang_code] and suffix=[eos]."""
217
+ self.cur_lang_code_id = self.convert_tokens_to_ids(tgt_lang)
218
+ self.prefix_tokens = [self.cur_lang_code_id]
219
+ self.suffix_tokens = [self.eos_token_id]
220
+
221
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
222
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
223
+
224
+ self._tokenizer.post_processor = processors.TemplateProcessing(
225
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
226
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
227
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
228
+ )
229
+
230
+ def _build_translation_inputs(
231
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
232
+ ):
233
+ """Used by translation pipeline, to prepare inputs for the generate function"""
234
+ if src_lang is None or tgt_lang is None:
235
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
236
+ self.src_lang = src_lang
237
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
238
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
239
+ inputs["forced_bos_token_id"] = tgt_lang_id
240
+ return inputs
241
+
242
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
243
+ if not self.can_save_slow_tokenizer:
244
+ raise ValueError(
245
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
246
+ "tokenizer."
247
+ )
248
+
249
+ if not os.path.isdir(save_directory):
250
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
251
+ return
252
+ out_vocab_file = os.path.join(
253
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
254
+ )
255
+
256
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
257
+ copyfile(self.vocab_file, out_vocab_file)
258
+
259
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/poolformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.28 kB). View file