applied-ai-018 commited on
Commit
3a7ab8a
·
verified ·
1 Parent(s): d35edd0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step40/zero/6.attention.dense.weight/fp32.pt +3 -0
  7. venv/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py +63 -0
  8. venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py +245 -0
  10. venv/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py +0 -0
  11. venv/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py +42 -0
  12. venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/transformers/models/bartpho/tokenization_bartpho.py +314 -0
  15. venv/lib/python3.10/site-packages/transformers/models/dbrx/__init__.py +51 -0
  16. venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/configuration_dbrx.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/modeling_dbrx.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/transformers/models/dbrx/configuration_dbrx.py +257 -0
  20. venv/lib/python3.10/site-packages/transformers/models/dbrx/modeling_dbrx.py +1523 -0
  21. venv/lib/python3.10/site-packages/transformers/models/esm/__init__.py +94 -0
  22. venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/__init__.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/configuration_esm.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/convert_esm.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esm.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esmfold.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_tf_esm.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/tokenization_esm.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/esm/configuration_esm.py +361 -0
  30. venv/lib/python3.10/site-packages/transformers/models/esm/convert_esm.py +400 -0
  31. venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esm.py +1265 -0
  32. venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esmfold.py +2322 -0
  33. venv/lib/python3.10/site-packages/transformers/models/esm/modeling_tf_esm.py +1567 -0
  34. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__init__.py +8 -0
  35. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/chunk_utils.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/data_transforms.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/feats.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/loss.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/protein.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/residue_constants.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/rigid_utils.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/tensor_utils.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/chunk_utils.py +397 -0
  45. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/data_transforms.py +93 -0
  46. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/feats.py +255 -0
  47. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/loss.py +105 -0
  48. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/protein.py +329 -0
  49. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/residue_constants.py +983 -0
  50. venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/rigid_utils.py +1242 -0
ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e89bf2e55e35ed0f05fbec26ef376f030a1b946484f40f2c9c0c827f184f1889
3
+ size 33555627
ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbc680c6b7aa23968e96f914f4e462357b4fb538d4a269593c227c67d4f4bd1c
3
+ size 33555627
ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:808d89a48271846ad48d88b3d8c563a03d2650c4ba42a020a2b2e9e35f2294d1
3
+ size 33555533
ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be0cdd8a27794a9b0901b27c872baccf9e5ff24b3ac7505f9b3235390c31e9ea
3
+ size 16778396
ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f05953525977728553a39a506e566c6611353f64cb94b6053cd02e152cef9964
3
+ size 16778411
ckpts/universal/global_step40/zero/6.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c7ae9a86ef8973269e9560eb05c8eba5ea41f355259ba690fc2ac7c0efb0c2
3
+ size 16778317
venv/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_autoformer": [
22
+ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "AutoformerConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_autoformer"] = [
34
+ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "AutoformerForPrediction",
36
+ "AutoformerModel",
37
+ "AutoformerPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_autoformer import (
43
+ AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ AutoformerConfig,
45
+ )
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ from .modeling_autoformer import (
54
+ AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
55
+ AutoformerForPrediction,
56
+ AutoformerModel,
57
+ AutoformerPreTrainedModel,
58
+ )
59
+
60
+ else:
61
+ import sys
62
+
63
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (954 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Autoformer model configuration"""
16
+
17
+ from typing import List, Optional
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class AutoformerConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an
32
+ Autoformer model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the Autoformer
34
+ [huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly)
35
+ architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ prediction_length (`int`):
42
+ The prediction length for the decoder. In other words, the prediction horizon of the model.
43
+ context_length (`int`, *optional*, defaults to `prediction_length`):
44
+ The context length for the encoder. If unset, the context length will be the same as the
45
+ `prediction_length`.
46
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
47
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
48
+ loss (`string`, *optional*, defaults to `"nll"`):
49
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
50
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
51
+ input_size (`int`, *optional*, defaults to 1):
52
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
53
+ multivariate targets.
54
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
55
+ The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
56
+ 5, 6, 7]`.
57
+ scaling (`bool`, *optional* defaults to `True`):
58
+ Whether to scale the input targets.
59
+ num_time_features (`int`, *optional*, defaults to 0):
60
+ The number of time features in the input time series.
61
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
62
+ The number of dynamic real valued features.
63
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
64
+ The number of static categorical features.
65
+ num_static_real_features (`int`, *optional*, defaults to 0):
66
+ The number of static real valued features.
67
+ cardinality (`list[int]`, *optional*):
68
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
69
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
70
+ `num_static_categorical_features` is > 0.
71
+ embedding_dimension (`list[int]`, *optional*):
72
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
73
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
74
+ `num_static_categorical_features` is > 0.
75
+ d_model (`int`, *optional*, defaults to 64):
76
+ Dimensionality of the transformer layers.
77
+ encoder_layers (`int`, *optional*, defaults to 2):
78
+ Number of encoder layers.
79
+ decoder_layers (`int`, *optional*, defaults to 2):
80
+ Number of decoder layers.
81
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
82
+ Number of attention heads for each attention layer in the Transformer encoder.
83
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
84
+ Number of attention heads for each attention layer in the Transformer decoder.
85
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
86
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
87
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
88
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
89
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
90
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
91
+ `"relu"` are supported.
92
+ dropout (`float`, *optional*, defaults to 0.1):
93
+ The dropout probability for all fully connected layers in the encoder, and decoder.
94
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
95
+ The dropout probability for the attention and fully connected layers for each encoder layer.
96
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
97
+ The dropout probability for the attention and fully connected layers for each decoder layer.
98
+ attention_dropout (`float`, *optional*, defaults to 0.1):
99
+ The dropout probability for the attention probabilities.
100
+ activation_dropout (`float`, *optional*, defaults to 0.1):
101
+ The dropout probability used between the two layers of the feed-forward networks.
102
+ num_parallel_samples (`int`, *optional*, defaults to 100):
103
+ The number of samples to generate in parallel for each time step of inference.
104
+ init_std (`float`, *optional*, defaults to 0.02):
105
+ The standard deviation of the truncated normal weight initialization distribution.
106
+ use_cache (`bool`, *optional*, defaults to `True`):
107
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
108
+ label_length (`int`, *optional*, defaults to 10):
109
+ Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
110
+ non-autoregressive generation).
111
+ moving_average (`int`, defaults to 25):
112
+ The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
113
+ Layer.
114
+ autocorrelation_factor (`int`, defaults to 3):
115
+ "Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
116
+ It's recommended in the paper to set it to a number between 1 and 5.
117
+
118
+
119
+ Example:
120
+
121
+ ```python
122
+ >>> from transformers import AutoformerConfig, AutoformerModel
123
+
124
+ >>> # Initializing a default Autoformer configuration
125
+ >>> configuration = AutoformerConfig()
126
+
127
+ >>> # Randomly initializing a model (with random weights) from the configuration
128
+ >>> model = AutoformerModel(configuration)
129
+
130
+ >>> # Accessing the model configuration
131
+ >>> configuration = model.config
132
+ ```"""
133
+
134
+ model_type = "autoformer"
135
+ attribute_map = {
136
+ "hidden_size": "d_model",
137
+ "num_attention_heads": "encoder_attention_heads",
138
+ "num_hidden_layers": "encoder_layers",
139
+ }
140
+
141
+ def __init__(
142
+ self,
143
+ prediction_length: Optional[int] = None,
144
+ context_length: Optional[int] = None,
145
+ distribution_output: str = "student_t",
146
+ loss: str = "nll",
147
+ input_size: int = 1,
148
+ lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7],
149
+ scaling: bool = True,
150
+ num_time_features: int = 0,
151
+ num_dynamic_real_features: int = 0,
152
+ num_static_categorical_features: int = 0,
153
+ num_static_real_features: int = 0,
154
+ cardinality: Optional[List[int]] = None,
155
+ embedding_dimension: Optional[List[int]] = None,
156
+ d_model: int = 64,
157
+ encoder_attention_heads: int = 2,
158
+ decoder_attention_heads: int = 2,
159
+ encoder_layers: int = 2,
160
+ decoder_layers: int = 2,
161
+ encoder_ffn_dim: int = 32,
162
+ decoder_ffn_dim: int = 32,
163
+ activation_function: str = "gelu",
164
+ dropout: float = 0.1,
165
+ encoder_layerdrop: float = 0.1,
166
+ decoder_layerdrop: float = 0.1,
167
+ attention_dropout: float = 0.1,
168
+ activation_dropout: float = 0.1,
169
+ num_parallel_samples: int = 100,
170
+ init_std: float = 0.02,
171
+ use_cache: bool = True,
172
+ is_encoder_decoder=True,
173
+ # Autoformer arguments
174
+ label_length: int = 10,
175
+ moving_average: int = 25,
176
+ autocorrelation_factor: int = 3,
177
+ **kwargs,
178
+ ):
179
+ # time series specific configuration
180
+ self.prediction_length = prediction_length
181
+ self.context_length = context_length if context_length is not None else prediction_length
182
+ self.distribution_output = distribution_output
183
+ self.loss = loss
184
+ self.input_size = input_size
185
+ self.num_time_features = num_time_features
186
+ self.lags_sequence = lags_sequence
187
+ self.scaling = scaling
188
+ self.num_dynamic_real_features = num_dynamic_real_features
189
+ self.num_static_real_features = num_static_real_features
190
+ self.num_static_categorical_features = num_static_categorical_features
191
+ if cardinality is not None and num_static_categorical_features > 0:
192
+ if len(cardinality) != num_static_categorical_features:
193
+ raise ValueError(
194
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
195
+ )
196
+ self.cardinality = cardinality
197
+ else:
198
+ self.cardinality = [0]
199
+ if embedding_dimension is not None and num_static_categorical_features > 0:
200
+ if len(embedding_dimension) != num_static_categorical_features:
201
+ raise ValueError(
202
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
203
+ )
204
+ self.embedding_dimension = embedding_dimension
205
+ else:
206
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
207
+ self.num_parallel_samples = num_parallel_samples
208
+
209
+ # Transformer architecture configuration
210
+ self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
211
+ self.d_model = d_model
212
+ self.encoder_attention_heads = encoder_attention_heads
213
+ self.decoder_attention_heads = decoder_attention_heads
214
+ self.encoder_ffn_dim = encoder_ffn_dim
215
+ self.decoder_ffn_dim = decoder_ffn_dim
216
+ self.encoder_layers = encoder_layers
217
+ self.decoder_layers = decoder_layers
218
+
219
+ self.dropout = dropout
220
+ self.attention_dropout = attention_dropout
221
+ self.activation_dropout = activation_dropout
222
+ self.encoder_layerdrop = encoder_layerdrop
223
+ self.decoder_layerdrop = decoder_layerdrop
224
+
225
+ self.activation_function = activation_function
226
+ self.init_std = init_std
227
+
228
+ self.use_cache = use_cache
229
+
230
+ # Autoformer
231
+ self.label_length = label_length
232
+ self.moving_average = moving_average
233
+ self.autocorrelation_factor = autocorrelation_factor
234
+
235
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
236
+
237
+ @property
238
+ def _number_of_features(self) -> int:
239
+ return (
240
+ sum(self.embedding_dimension)
241
+ + self.num_dynamic_real_features
242
+ + self.num_time_features
243
+ + self.num_static_real_features
244
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
245
+ )
venv/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_bartpho"] = ["BartphoTokenizer"]
29
+
30
+ if TYPE_CHECKING:
31
+ try:
32
+ if not is_sentencepiece_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ from .tokenization_bartpho import BartphoTokenizer
38
+
39
+ else:
40
+ import sys
41
+
42
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (696 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bartpho/tokenization_bartpho.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 VinAI Research and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for BARTpho-syllable model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ SPIECE_UNDERLINE = "▁"
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
33
+
34
+
35
+ class BartphoTokenizer(PreTrainedTokenizer):
36
+ """
37
+ Adapted from [`XLMRobertaTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece).
38
+
39
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
40
+ this superclass for more information regarding those methods.
41
+
42
+ Args:
43
+ vocab_file (`str`):
44
+ Path to the vocabulary file. This vocabulary is the pre-trained SentencePiece model available from the
45
+ multilingual XLM-RoBERTa, also used in mBART, consisting of 250K types.
46
+ monolingual_vocab_file (`str`):
47
+ Path to the monolingual vocabulary file. This monolingual vocabulary consists of Vietnamese-specialized
48
+ types extracted from the multilingual vocabulary vocab_file of 250K types.
49
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
50
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
51
+
52
+ <Tip>
53
+
54
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
55
+ sequence. The token used is the `cls_token`.
56
+
57
+ </Tip>
58
+
59
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
60
+ The end of sequence token.
61
+
62
+ <Tip>
63
+
64
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
65
+ The token used is the `sep_token`.
66
+
67
+ </Tip>
68
+
69
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
70
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
71
+ sequence classification or for a text and a question for question answering. It is also used as the last
72
+ token of a sequence built with special tokens.
73
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
74
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
75
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
76
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
77
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
78
+ token instead.
79
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
80
+ The token used for padding, for example when batching sequences of different lengths.
81
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
82
+ The token used for masking values. This is the token used when training this model with masked language
83
+ modeling. This is the token which the model will try to predict.
84
+ sp_model_kwargs (`dict`, *optional*):
85
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
86
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
87
+ to set:
88
+
89
+ - `enable_sampling`: Enable subword regularization.
90
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
91
+
92
+ - `nbest_size = {0,1}`: No sampling is performed.
93
+ - `nbest_size > 1`: samples from the nbest_size results.
94
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
95
+ using forward-filtering-and-backward-sampling algorithm.
96
+
97
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
98
+ BPE-dropout.
99
+
100
+ Attributes:
101
+ sp_model (`SentencePieceProcessor`):
102
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
103
+ """
104
+
105
+ vocab_files_names = VOCAB_FILES_NAMES
106
+ model_input_names = ["input_ids", "attention_mask"]
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_file,
111
+ monolingual_vocab_file,
112
+ bos_token="<s>",
113
+ eos_token="</s>",
114
+ sep_token="</s>",
115
+ cls_token="<s>",
116
+ unk_token="<unk>",
117
+ pad_token="<pad>",
118
+ mask_token="<mask>",
119
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
120
+ **kwargs,
121
+ ) -> None:
122
+ # Mask token behave like a normal word, i.e. include the space before it
123
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
124
+
125
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
126
+
127
+ self.vocab_file = vocab_file
128
+ self.monolingual_vocab_file = monolingual_vocab_file
129
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
130
+ self.sp_model.Load(str(vocab_file))
131
+
132
+ # Load the reduced vocab
133
+
134
+ # Keep order of special tokens for backward compatibility
135
+ self.fairseq_tokens_to_ids = {}
136
+ cnt = 0
137
+ for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
138
+ if str(token) not in self.fairseq_tokens_to_ids:
139
+ self.fairseq_tokens_to_ids[str(token)] = cnt
140
+ cnt += 1
141
+ with open(monolingual_vocab_file, "r", encoding="utf-8") as f:
142
+ for line in f.readlines():
143
+ token = line.strip().split()[0]
144
+ self.fairseq_tokens_to_ids[token] = len(self.fairseq_tokens_to_ids)
145
+ if str(mask_token) not in self.fairseq_tokens_to_ids:
146
+ self.fairseq_tokens_to_ids[str(mask_token)] = len(self.fairseq_tokens_to_ids)
147
+
148
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
149
+
150
+ super().__init__(
151
+ bos_token=bos_token,
152
+ eos_token=eos_token,
153
+ unk_token=unk_token,
154
+ sep_token=sep_token,
155
+ cls_token=cls_token,
156
+ pad_token=pad_token,
157
+ mask_token=mask_token,
158
+ sp_model_kwargs=self.sp_model_kwargs,
159
+ **kwargs,
160
+ )
161
+
162
+ def __getstate__(self):
163
+ state = self.__dict__.copy()
164
+ state["sp_model"] = None
165
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
166
+ return state
167
+
168
+ def __setstate__(self, d):
169
+ self.__dict__ = d
170
+
171
+ # for backward compatibility
172
+ if not hasattr(self, "sp_model_kwargs"):
173
+ self.sp_model_kwargs = {}
174
+
175
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
176
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
177
+
178
+ def build_inputs_with_special_tokens(
179
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
180
+ ) -> List[int]:
181
+ """
182
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
183
+ adding special tokens. An BARTPho sequence has the following format:
184
+
185
+ - single sequence: `<s> X </s>`
186
+ - pair of sequences: `<s> A </s></s> B </s>`
187
+
188
+ Args:
189
+ token_ids_0 (`List[int]`):
190
+ List of IDs to which the special tokens will be added.
191
+ token_ids_1 (`List[int]`, *optional*):
192
+ Optional second list of IDs for sequence pairs.
193
+
194
+ Returns:
195
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
196
+ """
197
+
198
+ if token_ids_1 is None:
199
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
200
+ cls = [self.cls_token_id]
201
+ sep = [self.sep_token_id]
202
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
203
+
204
+ def get_special_tokens_mask(
205
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
206
+ ) -> List[int]:
207
+ """
208
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
209
+ special tokens using the tokenizer `prepare_for_model` method.
210
+
211
+ Args:
212
+ token_ids_0 (`List[int]`):
213
+ List of IDs.
214
+ token_ids_1 (`List[int]`, *optional*):
215
+ Optional second list of IDs for sequence pairs.
216
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
217
+ Whether or not the token list is already formatted with special tokens for the model.
218
+
219
+ Returns:
220
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
221
+ """
222
+
223
+ if already_has_special_tokens:
224
+ return super().get_special_tokens_mask(
225
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
226
+ )
227
+
228
+ if token_ids_1 is None:
229
+ return [1] + ([0] * len(token_ids_0)) + [1]
230
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
231
+
232
+ def create_token_type_ids_from_sequences(
233
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
234
+ ) -> List[int]:
235
+ """
236
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not
237
+ make use of token type ids, therefore a list of zeros is returned.
238
+
239
+ Args:
240
+ token_ids_0 (`List[int]`):
241
+ List of IDs.
242
+ token_ids_1 (`List[int]`, *optional*):
243
+ Optional second list of IDs for sequence pairs.
244
+
245
+ Returns:
246
+ `List[int]`: List of zeros.
247
+
248
+ """
249
+
250
+ sep = [self.sep_token_id]
251
+ cls = [self.cls_token_id]
252
+
253
+ if token_ids_1 is None:
254
+ return len(cls + token_ids_0 + sep) * [0]
255
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
256
+
257
+ @property
258
+ def vocab_size(self):
259
+ return len(self.fairseq_ids_to_tokens)
260
+
261
+ def get_vocab(self):
262
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
263
+ vocab.update(self.added_tokens_encoder)
264
+ return vocab
265
+
266
+ def _tokenize(self, text: str) -> List[str]:
267
+ return self.sp_model.encode(text, out_type=str)
268
+
269
+ def _convert_token_to_id(self, token):
270
+ """Converts a token (str) in an id using the vocab."""
271
+ if token in self.fairseq_tokens_to_ids:
272
+ return self.fairseq_tokens_to_ids[token]
273
+ else:
274
+ return self.unk_token_id
275
+
276
+ def _convert_id_to_token(self, index):
277
+ """Converts an index (integer) in a token (str) using the vocab."""
278
+ return self.fairseq_ids_to_tokens[index]
279
+
280
+ def convert_tokens_to_string(self, tokens):
281
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
282
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
283
+ return out_string
284
+
285
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
286
+ if not os.path.isdir(save_directory):
287
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
288
+ return
289
+ out_vocab_file = os.path.join(
290
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
291
+ )
292
+ out_monolingual_vocab_file = os.path.join(
293
+ save_directory,
294
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"],
295
+ )
296
+
297
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
298
+ copyfile(self.vocab_file, out_vocab_file)
299
+ elif not os.path.isfile(self.vocab_file):
300
+ with open(out_vocab_file, "wb") as fi:
301
+ content_spiece_model = self.sp_model.serialized_model_proto()
302
+ fi.write(content_spiece_model)
303
+
304
+ if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
305
+ out_monolingual_vocab_file
306
+ ) and os.path.isfile(self.monolingual_vocab_file):
307
+ copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file)
308
+ elif not os.path.isfile(self.monolingual_vocab_file):
309
+ with open(out_monolingual_vocab_file, "w", encoding="utf-8") as fp:
310
+ for token in self.fairseq_tokens_to_ids:
311
+ if token not in self.all_special_tokens:
312
+ fp.write(f"{str(token)} \n")
313
+
314
+ return out_vocab_file, out_monolingual_vocab_file
venv/lib/python3.10/site-packages/transformers/models/dbrx/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_dbrx": ["DbrxConfig"],
21
+ }
22
+
23
+ try:
24
+ if not is_torch_available():
25
+ raise OptionalDependencyNotAvailable()
26
+ except OptionalDependencyNotAvailable:
27
+ pass
28
+ else:
29
+ _import_structure["modeling_dbrx"] = [
30
+ "DbrxForCausalLM",
31
+ "DbrxModel",
32
+ "DbrxPreTrainedModel",
33
+ ]
34
+
35
+
36
+ if TYPE_CHECKING:
37
+ from .configuration_dbrx import DbrxConfig
38
+
39
+ try:
40
+ if not is_torch_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ from .modeling_dbrx import DbrxForCausalLM, DbrxModel, DbrxPreTrainedModel
46
+
47
+
48
+ else:
49
+ import sys
50
+
51
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (795 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/configuration_dbrx.cpython-310.pyc ADDED
Binary file (9.37 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/modeling_dbrx.cpython-310.pyc ADDED
Binary file (44.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/dbrx/configuration_dbrx.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Databricks Mosaic Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DBRX model configuration """
16
+
17
+ from typing import Any, Optional
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class DbrxAttentionConfig(PretrainedConfig):
27
+ """Configuration class for Dbrx Attention.
28
+
29
+ [`DbrxAttention`] class. It is used to instantiate attention layers
30
+ according to the specified arguments, defining the layers architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
37
+ The dropout probability for the attention layers.
38
+ clip_qkv (`float`, *optional*):
39
+ If set, clip the queries, keys, and values in the attention layer to this value.
40
+ kv_n_heads (`Optional[int]`, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads.
41
+ rope_theta (`float`, defaults to 10000.0): The base frequency for rope.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ attn_pdrop: float = 0.0,
47
+ clip_qkv: Optional[float] = None,
48
+ kv_n_heads: int = 1,
49
+ rope_theta: float = 10000.0,
50
+ **kwargs: Any,
51
+ ):
52
+ super().__init__(**kwargs)
53
+ self.attn_pdrop = attn_pdrop
54
+ self.clip_qkv = clip_qkv
55
+ self.kv_n_heads = kv_n_heads
56
+ self.rope_theta = rope_theta
57
+
58
+ for k in ["model_type"]:
59
+ if k in kwargs:
60
+ kwargs.pop(k)
61
+ if len(kwargs) != 0:
62
+ raise ValueError(f"Found unknown {kwargs=}")
63
+
64
+ @classmethod
65
+ def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs: Any) -> "PretrainedConfig":
66
+ cls._set_token_in_kwargs(kwargs)
67
+
68
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
69
+
70
+ if config_dict.get("model_type") == "dbrx":
71
+ config_dict = config_dict["attn_config"]
72
+
73
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
74
+ logger.warning(
75
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
76
+ + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
77
+ )
78
+
79
+ return cls.from_dict(config_dict, **kwargs)
80
+
81
+
82
+ class DbrxFFNConfig(PretrainedConfig):
83
+ """Configuration class for Dbrx FFN.
84
+
85
+ [`DbrxFFN`] class. It is used to instantiate feedforward layers according to
86
+ the specified arguments, defining the layers architecture.
87
+
88
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
89
+ documentation from [`PretrainedConfig`] for more information.
90
+
91
+ Args:
92
+ ffn_act_fn (`dict`, *optional*, defaults to `None`): A dict specifying activation function for the FFN.
93
+ The dict should have a key 'name' with the value being the name of the activation function along with
94
+ any additional keyword arguments. If `None`, then set to `{"name": "silu"}`.
95
+ ffn_hidden_size (`int`, defaults to 3584): The hidden size of the feedforward network.
96
+ moe_num_experts (`int`, defaults to 4): The number of experts in the mixture of experts layer.
97
+ moe_top_k (`int`, defaults to 1): The number of experts to use in the mixture of experts layer.
98
+ moe_jitter_eps (`float`, *optional*, defaults to `None`): If not `None`, the jitter epsilon for the mixture of experts layer.
99
+ moe_loss_weight (`float`, defaults to 0.01): The loss weight for the mixture of experts layer.
100
+ moe_normalize_expert_weights (`float`, *optional*, defaults to 1.0): The normalization factor for the expert weights.
101
+ """
102
+
103
+ def __init__(
104
+ self,
105
+ ffn_act_fn: dict = None,
106
+ ffn_hidden_size: int = 3584,
107
+ moe_num_experts: int = 4,
108
+ moe_top_k: int = 1,
109
+ moe_jitter_eps: Optional[float] = None,
110
+ moe_loss_weight: float = 0.01,
111
+ moe_normalize_expert_weights: Optional[float] = 1.0,
112
+ **kwargs: Any,
113
+ ):
114
+ super().__init__()
115
+ if ffn_act_fn is None:
116
+ ffn_act_fn = {"name": "silu"}
117
+ self.ffn_act_fn = ffn_act_fn
118
+ self.ffn_hidden_size = ffn_hidden_size
119
+ self.moe_num_experts = moe_num_experts
120
+ self.moe_top_k = moe_top_k
121
+ self.moe_jitter_eps = moe_jitter_eps
122
+ self.moe_loss_weight = moe_loss_weight
123
+ self.moe_normalize_expert_weights = moe_normalize_expert_weights
124
+
125
+ for k in ["model_type"]:
126
+ if k in kwargs:
127
+ kwargs.pop(k)
128
+ if len(kwargs) != 0:
129
+ raise ValueError(f"Found unknown {kwargs=}")
130
+
131
+ @classmethod
132
+ def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs: Any) -> "PretrainedConfig":
133
+ cls._set_token_in_kwargs(kwargs)
134
+
135
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
136
+
137
+ if config_dict.get("model_type") == "dbrx":
138
+ config_dict = config_dict["ffn_config"]
139
+
140
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
141
+ logger.warning(
142
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
143
+ + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
144
+ )
145
+
146
+ return cls.from_dict(config_dict, **kwargs)
147
+
148
+
149
+ class DbrxConfig(PretrainedConfig):
150
+ r"""
151
+
152
+ This is the configuration class to store the configuration of a [`DbrxModel`]. It is used to instantiate a Dbrx model according to the
153
+ specified arguments, defining the model architecture. Instantiating a configuration with the
154
+ defaults will yield a different configuration to that of the [databricks/dbrx-instruct](https://huggingface.co/databricks/dbrx-instruct) architecture.
155
+
156
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
157
+ documentation from [`PretrainedConfig`] for more information.
158
+
159
+
160
+ Args:
161
+ d_model (`int`, *optional*, defaults to 2048):
162
+ Dimensionality of the embeddings and hidden states.
163
+ n_heads (`int`, *optional*, defaults to 16):
164
+ Number of attention heads for each attention layer in the Transformer encoder.
165
+ n_layers (`int`, *optional*, defaults to 24):
166
+ Number of hidden layers in the Transformer encoder.
167
+ max_seq_len (`int`, *optional*, defaults to 2048):
168
+ The maximum sequence length of the model.
169
+ vocab_size (`int`, *optional*, defaults to 32000):
170
+ Vocabulary size of the Dbrx model. Defines the maximum number of different tokens that can be represented by
171
+ the `inputs_ids` passed when calling [`DbrxModel`].
172
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
173
+ The dropout probability applied to the attention output before combining with residual.
174
+ emb_pdrop (`float`, *optional*, defaults to 0.0):
175
+ The dropout probability for the embedding layer.
176
+ attn_config (`dict`, *optional*):
177
+ A dictionary used to configure the model's attention module.
178
+ ffn_config (`dict`, *optional*):
179
+ A dictionary used to configure the model's FFN module.
180
+ use_cache (`bool`, *optional*, defaults to `True`):
181
+ Whether or not the model should return the last key/values attentions (not used by all models).
182
+ initializer_range (`float`, *optional*, defaults to 0.02):
183
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
184
+ output_router_logits (`bool`, *optional*, defaults to `False`):
185
+ Whether or not the router logits should be returned by the model. Enabling this will also
186
+ allow the model to output the auxiliary loss. See [here]() for more details.
187
+
188
+
189
+ Example:
190
+ ```python
191
+ >>> from transformers import DbrxConfig, DbrxModel
192
+
193
+ >>> # Initializing a Dbrx configuration
194
+ >>> configuration = DbrxConfig(n_layers=2, d_model=256, n_heads=8, vocab_size=128)
195
+
196
+ >>> # Initializing a model (with random weights) from the configuration
197
+ >>> model = DbrxModel(configuration)
198
+
199
+ >>> # Accessing the model configuration
200
+ >>> configuration = model.config
201
+ ```
202
+ """
203
+
204
+ model_type = "dbrx"
205
+ attribute_map = {
206
+ "num_attention_heads": "n_heads",
207
+ "hidden_size": "d_model",
208
+ "num_hidden_layers": "n_layers",
209
+ "max_position_embeddings": "max_seq_len",
210
+ }
211
+
212
+ def __init__(
213
+ self,
214
+ d_model: int = 2048,
215
+ n_heads: int = 16,
216
+ n_layers: int = 24,
217
+ max_seq_len: int = 2048,
218
+ vocab_size: int = 32000,
219
+ resid_pdrop: float = 0.0,
220
+ emb_pdrop: float = 0.0,
221
+ attn_config: Optional[DbrxAttentionConfig] = None,
222
+ ffn_config: Optional[DbrxFFNConfig] = None,
223
+ use_cache: bool = True,
224
+ initializer_range: float = 0.02,
225
+ output_router_logits: bool = False,
226
+ **kwargs: Any,
227
+ ):
228
+ if attn_config is None:
229
+ self.attn_config = DbrxAttentionConfig()
230
+ elif isinstance(attn_config, dict):
231
+ self.attn_config = DbrxAttentionConfig(**attn_config)
232
+ else:
233
+ self.attn_config = attn_config
234
+
235
+ if ffn_config is None:
236
+ self.ffn_config = DbrxFFNConfig()
237
+ elif isinstance(ffn_config, dict):
238
+ self.ffn_config = DbrxFFNConfig(**ffn_config)
239
+ else:
240
+ self.ffn_config = ffn_config
241
+
242
+ self.d_model = d_model
243
+ self.n_heads = n_heads
244
+ self.n_layers = n_layers
245
+ self.max_seq_len = max_seq_len
246
+ self.vocab_size = vocab_size
247
+ self.resid_pdrop = resid_pdrop
248
+ self.emb_pdrop = emb_pdrop
249
+ self.use_cache = use_cache
250
+ self.initializer_range = initializer_range
251
+ self.output_router_logits = output_router_logits
252
+
253
+ tie_word_embeddings = kwargs.pop("tie_word_embeddings", False)
254
+ if tie_word_embeddings:
255
+ raise ValueError("tie_word_embeddings is not supported for DBRX models.")
256
+
257
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/dbrx/modeling_dbrx.py ADDED
@@ -0,0 +1,1523 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Databricks Mosaic Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch DBRX model. """
16
+
17
+ import math
18
+ from typing import Any, Dict, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn.functional as F
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...activations import ACT2FN
26
+ from ...cache_utils import Cache, DynamicCache, StaticCache
27
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
28
+ from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ add_start_docstrings,
32
+ add_start_docstrings_to_model_forward,
33
+ is_flash_attn_2_available,
34
+ is_flash_attn_greater_or_equal_2_10,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_dbrx import DbrxConfig
39
+
40
+
41
+ if is_flash_attn_2_available():
42
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
43
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = "DbrxConfig"
48
+
49
+
50
+ # Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with Gemma->Dbrx
51
+ class DbrxRotaryEmbedding(nn.Module):
52
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
53
+ super().__init__()
54
+
55
+ self.dim = dim
56
+ self.max_position_embeddings = max_position_embeddings
57
+ self.base = base
58
+ self.register_buffer("inv_freq", None, persistent=False)
59
+
60
+ @torch.no_grad()
61
+ def forward(self, x, position_ids, seq_len=None):
62
+ # x: [bs, num_attention_heads, seq_len, head_size]
63
+ if self.inv_freq is None:
64
+ self.inv_freq = 1.0 / (
65
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
66
+ )
67
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
68
+ position_ids_expanded = position_ids[:, None, :].float()
69
+ # Force float32 since bfloat16 loses precision on long contexts
70
+ # See https://github.com/huggingface/transformers/pull/29285
71
+ device_type = x.device.type
72
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
73
+ with torch.autocast(device_type=device_type, enabled=False):
74
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
75
+ emb = torch.cat((freqs, freqs), dim=-1)
76
+ cos = emb.cos()
77
+ sin = emb.sin()
78
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
79
+
80
+
81
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
82
+ def rotate_half(x):
83
+ """Rotates half the hidden dims of the input."""
84
+ x1 = x[..., : x.shape[-1] // 2]
85
+ x2 = x[..., x.shape[-1] // 2 :]
86
+ return torch.cat((-x2, x1), dim=-1)
87
+
88
+
89
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
90
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
91
+ """Applies Rotary Position Embedding to the query and key tensors.
92
+
93
+ Args:
94
+ q (`torch.Tensor`): The query tensor.
95
+ k (`torch.Tensor`): The key tensor.
96
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
97
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
98
+ position_ids (`torch.Tensor`, *optional*):
99
+ Deprecated and unused.
100
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
101
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
102
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
103
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
104
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
105
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
106
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
107
+ Returns:
108
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
109
+ """
110
+ cos = cos.unsqueeze(unsqueeze_dim)
111
+ sin = sin.unsqueeze(unsqueeze_dim)
112
+ q_embed = (q * cos) + (rotate_half(q) * sin)
113
+ k_embed = (k * cos) + (rotate_half(k) * sin)
114
+ return q_embed, k_embed
115
+
116
+
117
+ # Copied from transformers.models.llama.modeling_llama.repeat_kv
118
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
119
+ """
120
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
121
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
122
+ """
123
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
124
+ if n_rep == 1:
125
+ return hidden_states
126
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
127
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
128
+
129
+
130
+ def load_balancing_loss_func(
131
+ gate_logits: torch.Tensor,
132
+ num_experts: int,
133
+ top_k: int,
134
+ attention_mask: Optional[torch.Tensor],
135
+ ) -> torch.Tensor:
136
+ r"""Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
137
+
138
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
139
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
140
+ experts is too unbalanced.
141
+
142
+ Args:
143
+ gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
144
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
145
+ shape [batch_size X sequence_length, num_experts].
146
+ num_experts (`int`):
147
+ Number of experts.
148
+ top_k (`int`):
149
+ The number of experts each token is routed to.
150
+ attention_mask (`torch.Tensor`, None):
151
+ The attention_mask used in forward function
152
+ shape [batch_size X sequence_length] if not None.
153
+
154
+ Returns:
155
+ The auxiliary loss.
156
+ """
157
+ if gate_logits is None or not isinstance(gate_logits, tuple):
158
+ return torch.tensor(0.0)
159
+
160
+ if isinstance(gate_logits, tuple):
161
+ compute_device = gate_logits[0].device
162
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
163
+
164
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
165
+
166
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
167
+
168
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
169
+
170
+ if attention_mask is None:
171
+ # Compute the percentage of tokens routed to each experts
172
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
173
+
174
+ # Compute the average probability of routing to these experts
175
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
176
+ else:
177
+ batch_size, sequence_length = attention_mask.shape
178
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
179
+
180
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
181
+ expert_attention_mask = (
182
+ attention_mask[None, :, :, None, None]
183
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
184
+ .reshape(-1, top_k, num_experts)
185
+ .to(compute_device)
186
+ )
187
+
188
+ # Compute the percentage of tokens routed to each experts
189
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
190
+ expert_attention_mask, dim=0
191
+ )
192
+
193
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
194
+ router_per_expert_attention_mask = (
195
+ attention_mask[None, :, :, None]
196
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
197
+ .reshape(-1, num_experts)
198
+ .to(compute_device)
199
+ )
200
+
201
+ # Compute the average probability of routing to these experts
202
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
203
+ router_per_expert_attention_mask, dim=0
204
+ )
205
+
206
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
207
+ return overall_loss * num_experts
208
+
209
+
210
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
211
+ def _get_unpad_data(attention_mask):
212
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
213
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
214
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
215
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
216
+ return (
217
+ indices,
218
+ cu_seqlens,
219
+ max_seqlen_in_batch,
220
+ )
221
+
222
+
223
+ class DbrxAttention(nn.Module):
224
+ """Multi-head self attention."""
225
+
226
+ def __init__(self, config: DbrxConfig, block_idx: Optional[int] = None):
227
+ super().__init__()
228
+ self.config = config
229
+ self.hidden_size = config.d_model
230
+ self.num_heads = config.n_heads
231
+ self.head_dim = self.hidden_size // self.num_heads
232
+ self.max_position_embeddings = config.max_seq_len
233
+ self.block_idx = block_idx
234
+ if block_idx is None:
235
+ logger.warning_once(
236
+ f"Instantiating {self.__class__.__name__} without passing a `block_idx` is not recommended and will "
237
+ + "lead to errors during the forward call if caching is used. Please make sure to provide a `block_idx` "
238
+ + "when creating this class."
239
+ )
240
+
241
+ attn_config = config.attn_config
242
+ self.attn_pdrop = attn_config.attn_pdrop
243
+ self.clip_qkv = attn_config.clip_qkv
244
+ self.num_key_value_heads = attn_config.kv_n_heads
245
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
246
+ self.rope_theta = attn_config.rope_theta
247
+ self.is_causal = True
248
+
249
+ self.Wqkv = nn.Linear(
250
+ self.hidden_size, self.hidden_size + 2 * self.num_key_value_heads * self.head_dim, bias=False
251
+ )
252
+ self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
253
+ self.rotary_emb = DbrxRotaryEmbedding(
254
+ self.head_dim,
255
+ max_position_embeddings=self.max_position_embeddings,
256
+ base=self.rope_theta,
257
+ )
258
+
259
+ def forward(
260
+ self,
261
+ hidden_states: torch.Tensor,
262
+ position_ids: torch.LongTensor,
263
+ attention_mask: Optional[torch.Tensor] = None,
264
+ past_key_value: Optional[Cache] = None,
265
+ output_attentions: bool = False,
266
+ use_cache: bool = False,
267
+ cache_position: Optional[torch.LongTensor] = None,
268
+ **kwargs: Any,
269
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
270
+ bsz, q_len, _ = hidden_states.size()
271
+
272
+ qkv_states = self.Wqkv(hidden_states)
273
+ min_val = -self.clip_qkv if self.clip_qkv is not None else None
274
+ max_val = self.clip_qkv
275
+ qkv_states = qkv_states.clamp(min=min_val, max=max_val)
276
+
277
+ query_states, key_states, value_states = qkv_states.split(
278
+ [
279
+ self.hidden_size,
280
+ self.num_key_value_heads * self.head_dim,
281
+ self.num_key_value_heads * self.head_dim,
282
+ ],
283
+ dim=2,
284
+ )
285
+
286
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
287
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
288
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
289
+
290
+ past_key_value = getattr(self, "past_key_value", past_key_value)
291
+ cos, sin = self.rotary_emb(value_states, position_ids)
292
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
293
+
294
+ if past_key_value is not None:
295
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
296
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
297
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
298
+
299
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
300
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
301
+
302
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
303
+
304
+ if attention_mask is not None: # no matter the length, we just slice it
305
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
306
+ attn_weights = attn_weights + causal_mask
307
+
308
+ # upcast attention to fp32
309
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
310
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attn_pdrop, training=self.training)
311
+ attn_output = torch.matmul(attn_weights, value_states)
312
+
313
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
314
+ raise ValueError(
315
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
316
+ + f" {attn_output.size()}"
317
+ )
318
+
319
+ attn_output = attn_output.transpose(1, 2).contiguous()
320
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
321
+ attn_output = self.out_proj(attn_output)
322
+
323
+ if not output_attentions:
324
+ attn_weights = None
325
+
326
+ return attn_output, attn_weights, past_key_value
327
+
328
+
329
+ class DbrxFlashAttention2(DbrxAttention):
330
+ """Dbrx flash attention module.
331
+
332
+ This module inherits from `DbrxAttention` as the weights of the module stays
333
+ untouched. The only required change would be on the forward pass where it
334
+ calls the public API of flash attention.
335
+ """
336
+
337
+ def __init__(self, *args: Any, **kwargs: Any):
338
+ super().__init__(*args, **kwargs)
339
+
340
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
341
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
342
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
343
+ # From: https://github.com/huggingface/transformers/blob/3b8e2932ce743008f63585aae1e1b8b30dc8b3ac/src/transformers/models/gemma/modeling_gemma.py#L318
344
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
345
+
346
+ def forward(
347
+ self,
348
+ hidden_states: torch.Tensor,
349
+ attention_mask: Optional[torch.LongTensor] = None,
350
+ position_ids: Optional[torch.LongTensor] = None,
351
+ past_key_value: Optional[Cache] = None,
352
+ output_attentions: bool = False,
353
+ use_cache: bool = False,
354
+ cache_position: Optional[torch.LongTensor] = None,
355
+ **kwargs: Any,
356
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
357
+ logger.info("Implicitly setting `output_attentions` to False as it is not supported in Flash Attention.")
358
+ output_attentions = False
359
+
360
+ bsz, q_len, _ = hidden_states.size()
361
+
362
+ qkv_states = self.Wqkv(hidden_states)
363
+ if self.clip_qkv is not None:
364
+ qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
365
+
366
+ query_states, key_states, value_states = qkv_states.split(
367
+ [
368
+ self.hidden_size,
369
+ self.num_key_value_heads * self.head_dim,
370
+ self.num_key_value_heads * self.head_dim,
371
+ ],
372
+ dim=2,
373
+ )
374
+
375
+ # Flash attention requires the input to have the shape
376
+ # batch_size x seq_length x head_dim x hidden_dim
377
+ # therefore we just need to keep the original shape
378
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
379
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
380
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
381
+
382
+ cos, sin = self.rotary_emb(value_states, position_ids)
383
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
384
+
385
+ past_key_value = getattr(self, "past_key_value", past_key_value)
386
+
387
+ if past_key_value is not None:
388
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
389
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
390
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
391
+
392
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout
393
+ # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
394
+ # to be able to avoid many of these transpose/reshape/view.
395
+ query_states = query_states.transpose(1, 2)
396
+ key_states = key_states.transpose(1, 2)
397
+ value_states = value_states.transpose(1, 2)
398
+
399
+ dropout_rate = self.attn_pdrop if self.training else 0.0
400
+
401
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
402
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
403
+ # cast them back in the correct dtype just to be sure everything works as expected.
404
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
405
+ # in fp32. (LlamaRMSNorm handles it correctly)
406
+ input_dtype = query_states.dtype
407
+ if input_dtype == torch.float32:
408
+ if torch.is_autocast_enabled():
409
+ target_dtype = torch.get_autocast_gpu_dtype()
410
+ # Handle the case where the model is quantized
411
+ elif hasattr(self.config, "_pre_quantization_dtype"):
412
+ target_dtype = self.config._pre_quantization_dtype
413
+ else:
414
+ target_dtype = query_states.dtype
415
+
416
+ logger.warning_once(
417
+ "The input hidden states seems to be silently casted in float32, this might be "
418
+ + "related to the fact you have upcasted embedding or layer norm layers in "
419
+ + f"float32. We will cast back the input in {target_dtype}."
420
+ )
421
+
422
+ query_states = query_states.to(target_dtype)
423
+ key_states = key_states.to(target_dtype)
424
+ value_states = value_states.to(target_dtype)
425
+
426
+ attn_output = self._flash_attention_forward(
427
+ query_states,
428
+ key_states,
429
+ value_states,
430
+ attention_mask,
431
+ q_len,
432
+ dropout=dropout_rate,
433
+ )
434
+
435
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
436
+ attn_output = self.out_proj(attn_output)
437
+
438
+ if not output_attentions:
439
+ attn_weights = None
440
+
441
+ return attn_output, attn_weights, past_key_value
442
+
443
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
444
+ def _flash_attention_forward(
445
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
446
+ ):
447
+ """
448
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
449
+ first unpad the input, then computes the attention scores and pad the final attention scores.
450
+
451
+ Args:
452
+ query_states (`torch.Tensor`):
453
+ Input query states to be passed to Flash Attention API
454
+ key_states (`torch.Tensor`):
455
+ Input key states to be passed to Flash Attention API
456
+ value_states (`torch.Tensor`):
457
+ Input value states to be passed to Flash Attention API
458
+ attention_mask (`torch.Tensor`):
459
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
460
+ position of padding tokens and 1 for the position of non-padding tokens.
461
+ dropout (`float`):
462
+ Attention dropout
463
+ softmax_scale (`float`, *optional*):
464
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
465
+ """
466
+ if not self._flash_attn_uses_top_left_mask:
467
+ causal = self.is_causal
468
+ else:
469
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
470
+ causal = self.is_causal and query_length != 1
471
+
472
+ # Contains at least one padding token in the sequence
473
+ if attention_mask is not None:
474
+ batch_size = query_states.shape[0]
475
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
476
+ query_states, key_states, value_states, attention_mask, query_length
477
+ )
478
+
479
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
480
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
481
+
482
+ attn_output_unpad = flash_attn_varlen_func(
483
+ query_states,
484
+ key_states,
485
+ value_states,
486
+ cu_seqlens_q=cu_seqlens_q,
487
+ cu_seqlens_k=cu_seqlens_k,
488
+ max_seqlen_q=max_seqlen_in_batch_q,
489
+ max_seqlen_k=max_seqlen_in_batch_k,
490
+ dropout_p=dropout,
491
+ softmax_scale=softmax_scale,
492
+ causal=causal,
493
+ )
494
+
495
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
496
+ else:
497
+ attn_output = flash_attn_func(
498
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
499
+ )
500
+
501
+ return attn_output
502
+
503
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
504
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
505
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
506
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
507
+
508
+ key_layer = index_first_axis(
509
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
510
+ )
511
+ value_layer = index_first_axis(
512
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
513
+ )
514
+ if query_length == kv_seq_len:
515
+ query_layer = index_first_axis(
516
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
517
+ )
518
+ cu_seqlens_q = cu_seqlens_k
519
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
520
+ indices_q = indices_k
521
+ elif query_length == 1:
522
+ max_seqlen_in_batch_q = 1
523
+ cu_seqlens_q = torch.arange(
524
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
525
+ ) # There is a memcpy here, that is very bad.
526
+ indices_q = cu_seqlens_q[:-1]
527
+ query_layer = query_layer.squeeze(1)
528
+ else:
529
+ # The -q_len: slice assumes left padding.
530
+ attention_mask = attention_mask[:, -query_length:]
531
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
532
+
533
+ return (
534
+ query_layer,
535
+ key_layer,
536
+ value_layer,
537
+ indices_q,
538
+ (cu_seqlens_q, cu_seqlens_k),
539
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
540
+ )
541
+
542
+
543
+ class DbrxSdpaAttention(DbrxAttention):
544
+ """
545
+ Dbrx attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
546
+ `DbrxAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
547
+ SDPA API.
548
+ """
549
+
550
+ def forward(
551
+ self,
552
+ hidden_states: torch.Tensor,
553
+ attention_mask: Optional[torch.Tensor] = None,
554
+ position_ids: Optional[torch.LongTensor] = None,
555
+ past_key_value: Optional[Cache] = None,
556
+ output_attentions: bool = False,
557
+ use_cache: bool = False,
558
+ cache_position: Optional[torch.LongTensor] = None,
559
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
560
+ if output_attentions:
561
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
562
+ logger.warning_once(
563
+ "DbrxModel is using DbrxSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
564
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
565
+ )
566
+ return super().forward(
567
+ hidden_states=hidden_states,
568
+ attention_mask=attention_mask,
569
+ position_ids=position_ids,
570
+ past_key_value=past_key_value,
571
+ output_attentions=output_attentions,
572
+ use_cache=use_cache,
573
+ cache_position=cache_position,
574
+ )
575
+
576
+ bsz, q_len, _ = hidden_states.size()
577
+
578
+ qkv_states = self.Wqkv(hidden_states)
579
+ if self.clip_qkv is not None:
580
+ qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
581
+
582
+ query_states, key_states, value_states = qkv_states.split(
583
+ [
584
+ self.hidden_size,
585
+ self.num_key_value_heads * self.head_dim,
586
+ self.num_key_value_heads * self.head_dim,
587
+ ],
588
+ dim=2,
589
+ )
590
+
591
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
592
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
593
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
594
+
595
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)
596
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, None)
597
+
598
+ past_key_value = getattr(self, "past_key_value", past_key_value)
599
+
600
+ if past_key_value is not None:
601
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
602
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
603
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
604
+
605
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
606
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
607
+
608
+ causal_mask = attention_mask
609
+ if attention_mask is not None:
610
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
611
+
612
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
613
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
614
+ if query_states.device.type == "cuda" and causal_mask is not None:
615
+ query_states = query_states.contiguous()
616
+ key_states = key_states.contiguous()
617
+ value_states = value_states.contiguous()
618
+
619
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
620
+ query_states,
621
+ key_states,
622
+ value_states,
623
+ attn_mask=causal_mask,
624
+ dropout_p=self.attn_pdrop if self.training else 0.0,
625
+ )
626
+
627
+ attn_output = attn_output.transpose(1, 2).contiguous()
628
+ attn_output = attn_output.view(bsz, q_len, -1)
629
+
630
+ attn_output = self.out_proj(attn_output)
631
+
632
+ return attn_output, None, past_key_value
633
+
634
+
635
+ DBRX_ATTENTION_CLASSES = {
636
+ "eager": DbrxAttention,
637
+ "flash_attention_2": DbrxFlashAttention2,
638
+ "sdpa": DbrxSdpaAttention,
639
+ }
640
+
641
+
642
+ class DbrxNormAttentionNorm(nn.Module):
643
+ def __init__(self, config: DbrxConfig, block_idx: Optional[int] = None):
644
+ super().__init__()
645
+ self.block_idx = block_idx
646
+ self.resid_pdrop = config.resid_pdrop
647
+ self.norm_1 = nn.LayerNorm(config.d_model, bias=False)
648
+ self.attn = DBRX_ATTENTION_CLASSES[config._attn_implementation](
649
+ config=config,
650
+ block_idx=block_idx,
651
+ )
652
+ self.norm_2 = nn.LayerNorm(config.d_model, bias=False)
653
+
654
+ def forward(
655
+ self,
656
+ hidden_states: torch.Tensor,
657
+ position_ids: torch.LongTensor,
658
+ attention_mask: Optional[torch.Tensor] = None,
659
+ past_key_value: Optional[Cache] = None,
660
+ output_attentions: bool = False,
661
+ use_cache: bool = False,
662
+ cache_position: Optional[torch.LongTensor] = None,
663
+ **kwargs: Any,
664
+ ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
665
+ residual_states = hidden_states
666
+ hidden_states = self.norm_1(hidden_states).to(hidden_states.dtype)
667
+
668
+ hidden_states, attn_weights, past_key_value = self.attn(
669
+ hidden_states=hidden_states,
670
+ attention_mask=attention_mask,
671
+ position_ids=position_ids,
672
+ past_key_value=past_key_value,
673
+ output_attentions=output_attentions,
674
+ use_cache=use_cache,
675
+ cache_position=cache_position,
676
+ **kwargs,
677
+ )
678
+
679
+ hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
680
+ hidden_states = hidden_states + residual_states
681
+
682
+ residual_states = hidden_states
683
+ hidden_states = self.norm_2(hidden_states).to(hidden_states.dtype)
684
+
685
+ return residual_states, hidden_states, attn_weights, past_key_value
686
+
687
+
688
+ class DbrxRouter(nn.Module):
689
+ def __init__(
690
+ self,
691
+ hidden_size: int,
692
+ moe_num_experts: int,
693
+ moe_top_k: int,
694
+ moe_jitter_eps: Optional[float],
695
+ moe_normalize_expert_weights: Optional[float],
696
+ ):
697
+ super().__init__()
698
+ self.hidden_size = hidden_size
699
+ self.moe_num_experts = moe_num_experts
700
+ self.moe_top_k = moe_top_k
701
+ self.moe_jitter_eps = moe_jitter_eps
702
+ self.moe_normalize_expert_weights = moe_normalize_expert_weights
703
+
704
+ self.layer = nn.Linear(self.hidden_size, self.moe_num_experts, bias=False)
705
+
706
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.LongTensor]:
707
+ if self.training and self.moe_jitter_eps is not None:
708
+ hidden_states *= torch.empty_like(hidden_states).uniform_(
709
+ 1.0 - self.moe_jitter_eps, 1.0 + self.moe_jitter_eps
710
+ )
711
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
712
+ weights = self.layer(hidden_states).softmax(dim=-1, dtype=torch.float32)
713
+ top_weights, top_experts = torch.topk(weights, self.moe_top_k, dim=-1)
714
+
715
+ top_weights_scale = (
716
+ torch.norm(top_weights, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True)
717
+ if self.moe_normalize_expert_weights is not None
718
+ else 1.0
719
+ )
720
+ top_weights = top_weights / top_weights_scale
721
+
722
+ weights = weights.to(hidden_states.dtype)
723
+ top_weights = top_weights.to(hidden_states.dtype)
724
+ return weights, top_weights, top_experts
725
+
726
+
727
+ class DbrxExpertGLU(nn.Module):
728
+ def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
729
+ super().__init__()
730
+ self.hidden_size = hidden_size
731
+ self.ffn_hidden_size = ffn_hidden_size
732
+ self.moe_num_experts = moe_num_experts
733
+
734
+ self.w1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
735
+ self.v1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
736
+ self.w2 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
737
+
738
+ act_fn_name = ffn_act_fn.get("name", "silu")
739
+ self.activation_fn = ACT2FN[act_fn_name]
740
+
741
+ def forward(
742
+ self, x: torch.Tensor, expert_w1: torch.Tensor, expert_v1: torch.Tensor, expert_w2: torch.Tensor
743
+ ) -> torch.Tensor:
744
+ gate_proj = x.matmul(expert_w1.t())
745
+ up_proj = x.matmul(expert_v1.t())
746
+ gate_proj = self.activation_fn(gate_proj)
747
+ intermediate_states = gate_proj * up_proj
748
+ down_proj = intermediate_states.matmul(expert_w2)
749
+ return down_proj
750
+
751
+
752
+ class DbrxExperts(nn.Module):
753
+ def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
754
+ super().__init__()
755
+ self.moe_num_experts = moe_num_experts
756
+ self.mlp = DbrxExpertGLU(
757
+ hidden_size=hidden_size,
758
+ ffn_hidden_size=ffn_hidden_size,
759
+ moe_num_experts=moe_num_experts,
760
+ ffn_act_fn=ffn_act_fn,
761
+ )
762
+
763
+ def forward(
764
+ self, x: torch.Tensor, weights: torch.Tensor, top_weights: torch.Tensor, top_experts: torch.LongTensor
765
+ ) -> torch.Tensor:
766
+ bsz, q_len, hidden_size = x.shape
767
+ x = x.view(-1, hidden_size)
768
+ out = torch.zeros_like(x)
769
+
770
+ expert_mask = nn.functional.one_hot(top_experts, num_classes=self.moe_num_experts).permute(2, 1, 0)
771
+ # Chunk experts at once to avoid storing full parameter multiple times in autograd
772
+ w1_chunked = self.mlp.w1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
773
+ self.moe_num_experts, dim=0
774
+ )
775
+ v1_chunked = self.mlp.v1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
776
+ self.moe_num_experts, dim=0
777
+ )
778
+ w2_chunked = self.mlp.w2.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
779
+ self.moe_num_experts, dim=0
780
+ )
781
+ w1_chunked = [w1.squeeze(dim=0) for w1 in w1_chunked]
782
+ v1_chunked = [v1.squeeze(dim=0) for v1 in v1_chunked]
783
+ w2_chunked = [w2.squeeze(dim=0) for w2 in w2_chunked]
784
+ for expert_idx in range(0, self.moe_num_experts):
785
+ topk_idx, token_idx = torch.where(expert_mask[expert_idx])
786
+ if token_idx.shape[0] == 0:
787
+ continue
788
+
789
+ token_list = token_idx
790
+ topk_list = topk_idx
791
+
792
+ expert_tokens = x[None, token_list].reshape(-1, hidden_size)
793
+ expert_out = (
794
+ self.mlp(expert_tokens, w1_chunked[expert_idx], v1_chunked[expert_idx], w2_chunked[expert_idx])
795
+ * top_weights[token_list, topk_list, None]
796
+ )
797
+
798
+ out.index_add_(0, token_idx, expert_out)
799
+
800
+ out = out.reshape(bsz, q_len, hidden_size)
801
+ return out
802
+
803
+
804
+ class DbrxFFN(nn.Module):
805
+ def __init__(self, config: DbrxConfig):
806
+ super().__init__()
807
+
808
+ ffn_config = config.ffn_config
809
+ self.router = DbrxRouter(
810
+ hidden_size=config.d_model,
811
+ moe_num_experts=ffn_config.moe_num_experts,
812
+ moe_top_k=ffn_config.moe_top_k,
813
+ moe_jitter_eps=ffn_config.moe_jitter_eps,
814
+ moe_normalize_expert_weights=ffn_config.moe_normalize_expert_weights,
815
+ )
816
+
817
+ self.experts = DbrxExperts(
818
+ hidden_size=config.d_model,
819
+ ffn_hidden_size=ffn_config.ffn_hidden_size,
820
+ moe_num_experts=ffn_config.moe_num_experts,
821
+ ffn_act_fn=ffn_config.ffn_act_fn,
822
+ )
823
+
824
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
825
+ weights, top_weights, top_experts = self.router(x)
826
+ out = self.experts(x, weights, top_weights, top_experts)
827
+ return out, weights
828
+
829
+
830
+ class DbrxBlock(nn.Module):
831
+ def __init__(self, config: DbrxConfig, block_idx: int):
832
+ super().__init__()
833
+ self.hidden_size = config.d_model
834
+ self.resid_pdrop = config.resid_pdrop
835
+ self.block_idx = block_idx
836
+ self.norm_attn_norm = DbrxNormAttentionNorm(
837
+ config=config,
838
+ block_idx=block_idx,
839
+ )
840
+ self.ffn = DbrxFFN(config=config)
841
+
842
+ def forward(
843
+ self,
844
+ hidden_states: torch.Tensor,
845
+ attention_mask: Optional[torch.Tensor] = None,
846
+ position_ids: torch.LongTensor = None,
847
+ past_key_value: Optional[Cache] = None,
848
+ output_attentions: Optional[bool] = False,
849
+ output_router_logits: Optional[bool] = False,
850
+ use_cache: Optional[bool] = False,
851
+ cache_position: Optional[torch.LongTensor] = None,
852
+ **kwargs: Any,
853
+ ) -> Union[
854
+ Tuple[torch.Tensor],
855
+ Tuple[torch.Tensor, Optional[torch.Tensor]],
856
+ Tuple[torch.Tensor, Optional[Cache]],
857
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]],
858
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]],
859
+ Tuple[torch.Tensor, Optional[Cache], Optional[torch.Tensor]],
860
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache], Optional[torch.Tensor]],
861
+ ]:
862
+ """Forward function for DbrxBlock.
863
+
864
+ Args:
865
+ hidden_states (`torch.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
866
+ position_ids (`torch.LongTensor`): position ids of shape `(batch, seq_len)`
867
+ attention_mask (`torch.Tensor`, optional): attention mask of size (batch_size, sequence_length)
868
+ if flash attention is used or (batch_size, 1, query_sequence_length, key_sequence_length)
869
+ if default attention is used.
870
+ past_key_value (`Tuple(torch.Tensor)`, optional): cached past key and value projection states
871
+ output_attentions (`bool`, optional): Whether or not to return the attentions tensors of all
872
+ attention layers. See `attentions` under returned tensors for more detail.
873
+ output_router_logits (`bool`, optional): Whether or not to return the router logits.
874
+ use_cache (`bool`, optional): If set to `True`, `past_key_values` key value states are
875
+ returned and can be used to speed up decoding (see `past_key_values`).
876
+ cache_position (`torch.LongTensor`, optional): position ids of the cache
877
+ """
878
+
879
+ # Norm + Attention + Norm
880
+ resid_states, hidden_states, self_attn_weights, present_key_value = self.norm_attn_norm(
881
+ hidden_states=hidden_states,
882
+ attention_mask=attention_mask,
883
+ position_ids=position_ids,
884
+ past_key_value=past_key_value,
885
+ output_attentions=output_attentions,
886
+ use_cache=use_cache,
887
+ cache_position=cache_position,
888
+ **kwargs,
889
+ )
890
+
891
+ # Fully Connected
892
+ hidden_states, router_logits = self.ffn(hidden_states)
893
+ hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
894
+ hidden_states = resid_states + hidden_states
895
+
896
+ outputs = (hidden_states,)
897
+
898
+ if output_attentions:
899
+ outputs += (self_attn_weights,)
900
+
901
+ if use_cache:
902
+ outputs += (present_key_value,)
903
+
904
+ if output_router_logits:
905
+ outputs += (router_logits,)
906
+
907
+ return outputs
908
+
909
+
910
+ DBRX_START_DOCSTRING = r"""
911
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
912
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
913
+ etc.)
914
+
915
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
916
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
917
+ and behavior.
918
+
919
+ Parameters:
920
+ config ([`DbrxConfig`]):
921
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
922
+ load the weights associated with the model, only the configuration. Check out the
923
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
924
+ """
925
+
926
+
927
+ @add_start_docstrings(
928
+ "The bare DBRX Model outputting raw hidden-states without any specific head on top.",
929
+ DBRX_START_DOCSTRING,
930
+ )
931
+ class DbrxPreTrainedModel(PreTrainedModel):
932
+ config_class = DbrxConfig
933
+ base_model_prefix = "transformer"
934
+ supports_gradient_checkpointing = True
935
+ _no_split_modules = ["DbrxBlock"]
936
+ _skip_keys_device_placement = ["past_key_values"]
937
+ _supports_flash_attn_2 = True
938
+ _supports_sdpa = True
939
+ _supports_cache_class = True
940
+
941
+ def _init_weights(self, module: nn.Module):
942
+ std = self.config.initializer_range
943
+ if isinstance(module, nn.Linear):
944
+ module.weight.data.normal_(mean=0.0, std=std)
945
+ if module.bias is not None:
946
+ module.bias.data.zero_()
947
+ elif isinstance(module, nn.Embedding):
948
+ module.weight.data.normal_(mean=0.0, std=std)
949
+ if module.padding_idx is not None:
950
+ module.weight.data[module.padding_idx].zero_()
951
+ elif isinstance(module, nn.LayerNorm):
952
+ module.weight.data.normal_(mean=0.0, std=std)
953
+ if module.bias is not None:
954
+ module.bias.data.zero_()
955
+ elif isinstance(module, DbrxExpertGLU):
956
+ module.w1.data.normal_(mean=0.0, std=std)
957
+ module.v1.data.normal_(mean=0.0, std=std)
958
+ module.w2.data.normal_(mean=0.0, std=std)
959
+
960
+ def _setup_cache(self, cache_cls: Any, max_batch_size: int, max_cache_len: int):
961
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
962
+ raise ValueError(
963
+ "`static` cache implementation is not compatible with "
964
+ + "`attn_implementation==flash_attention_2`. Make sure to use "
965
+ + "`spda` in the mean time and open an issue at https://github.com/huggingface/transformers."
966
+ )
967
+
968
+ for block in self.transformer.blocks:
969
+ device = block.norm_attn_norm.norm_1.weight.device
970
+ if hasattr(self.config, "_pre_quantization_dtype"):
971
+ dtype = self.config._pre_quantization_dtype
972
+ else:
973
+ dtype = block.norm_attn_norm.attn.out_proj.weight.dtype
974
+ block.norm_attn_norm.attn.past_key_value = cache_cls(
975
+ self.config, max_batch_size, max_cache_len, device=device, dtype=dtype
976
+ )
977
+
978
+ def _reset_cache(self):
979
+ for block in self.transformer.blocks:
980
+ block.norm_attn_norm.attn.past_key_value = None
981
+
982
+
983
+ DBRX_INPUTS_DOCSTRING = r"""
984
+ Args:
985
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
986
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
987
+ it.
988
+
989
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
990
+ [`PreTrainedTokenizer.__call__`] for details.
991
+
992
+ [What are input IDs?](../glossary#input-ids)
993
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
994
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
995
+
996
+ - 1 for tokens that are **not masked**,
997
+ - 0 for tokens that are **masked**.
998
+
999
+ [What are attention masks?](../glossary#attention-mask)
1000
+
1001
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1002
+ [`PreTrainedTokenizer.__call__`] for details.
1003
+
1004
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
1005
+ `past_key_values`).
1006
+
1007
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1008
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1009
+ information on the default strategy.
1010
+
1011
+ - 1 indicates the head is **not masked**,
1012
+ - 0 indicates the head is **masked**.
1013
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1014
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1015
+ config.n_positions - 1]`.
1016
+
1017
+ [What are position IDs?](../glossary#position-ids)
1018
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1019
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1020
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1021
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1022
+
1023
+ Two formats are allowed:
1024
+ - a [`~cache_utils.Cache`] instance;
1025
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1026
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1027
+ cache format.
1028
+
1029
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1030
+ legacy cache format will be returned.
1031
+
1032
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1033
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1034
+ of shape `(batch_size, sequence_length)`.
1035
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1036
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1037
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1038
+ model's internal embedding lookup matrix.
1039
+ use_cache (`bool`, *optional*):
1040
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1041
+ `past_key_values`).
1042
+ output_attentions (`bool`, *optional*):
1043
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1044
+ tensors for more detail.
1045
+ output_hidden_states (`bool`, *optional*):
1046
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1047
+ more detail.
1048
+ output_router_logits (`bool`, *optional*):
1049
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
1050
+ should not be returned during inference.
1051
+ return_dict (`bool`, *optional*):
1052
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1053
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1054
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1055
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1056
+ the complete sequence length.
1057
+ """
1058
+
1059
+
1060
+ @add_start_docstrings(
1061
+ "The bare DBRX Model outputting raw hidden-states without any specific head on top.",
1062
+ DBRX_START_DOCSTRING,
1063
+ )
1064
+ class DbrxModel(DbrxPreTrainedModel):
1065
+ """Transformer decoder consisting of *config.num_hidden_layers*. Each layer is a [`DbrxBlock`] layer.
1066
+
1067
+ Args:
1068
+ config ([`DbrxConfig`]): Model configuration class with all parameters of the model.
1069
+ Initializing with a config file does not load the weights associated with the model, only the
1070
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1071
+ """
1072
+
1073
+ def __init__(self, config: DbrxConfig):
1074
+ super().__init__(config)
1075
+ self.padding_idx = config.pad_token_id
1076
+ self.vocab_size = config.vocab_size
1077
+ self.emb_pdrop = config.emb_pdrop
1078
+
1079
+ self.wte = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
1080
+ self.blocks = nn.ModuleList([DbrxBlock(config, block_idx) for block_idx in range(config.n_layers)])
1081
+ self.norm_f = nn.LayerNorm(config.d_model, bias=False)
1082
+ self.gradient_checkpointing = False
1083
+
1084
+ # Initialize weights and apply final processing
1085
+ self.post_init()
1086
+
1087
+ def get_input_embeddings(self) -> nn.Embedding:
1088
+ return self.wte
1089
+
1090
+ def set_input_embeddings(self, value: nn.Embedding):
1091
+ self.wte = value
1092
+
1093
+ @add_start_docstrings_to_model_forward(DBRX_INPUTS_DOCSTRING)
1094
+ def forward(
1095
+ self,
1096
+ input_ids: Optional[torch.LongTensor] = None,
1097
+ attention_mask: Optional[torch.Tensor] = None,
1098
+ position_ids: Optional[torch.LongTensor] = None,
1099
+ past_key_values: Optional[Cache] = None,
1100
+ inputs_embeds: Optional[torch.Tensor] = None,
1101
+ use_cache: Optional[bool] = None,
1102
+ output_attentions: Optional[bool] = None,
1103
+ output_hidden_states: Optional[bool] = None,
1104
+ output_router_logits: Optional[bool] = None,
1105
+ return_dict: Optional[bool] = None,
1106
+ cache_position: Optional[torch.LongTensor] = None,
1107
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
1108
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1109
+ output_hidden_states = (
1110
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1111
+ )
1112
+ output_router_logits = (
1113
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1114
+ )
1115
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1116
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1117
+
1118
+ if (input_ids is None) ^ (inputs_embeds is not None):
1119
+ raise ValueError(
1120
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
1121
+ )
1122
+
1123
+ if self.gradient_checkpointing and self.training and use_cache:
1124
+ logger.warning_once(
1125
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1126
+ )
1127
+ use_cache = False
1128
+
1129
+ if inputs_embeds is None:
1130
+ inputs_embeds = self.wte(input_ids)
1131
+
1132
+ inputs_embeds = nn.functional.dropout(inputs_embeds, p=self.emb_pdrop, training=self.training)
1133
+
1134
+ past_seen_tokens = 0
1135
+ if use_cache: # kept for BC (cache positions)
1136
+ if not isinstance(past_key_values, StaticCache):
1137
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1138
+ past_seen_tokens = past_key_values.get_seq_length()
1139
+
1140
+ if cache_position is None:
1141
+ if isinstance(past_key_values, StaticCache):
1142
+ raise ValueError("cache_position is a required argument when using StaticCache.")
1143
+ cache_position = torch.arange(
1144
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1145
+ )
1146
+
1147
+ if position_ids is None:
1148
+ position_ids = cache_position.unsqueeze(0)
1149
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
1150
+
1151
+ # embed positions
1152
+ hidden_states = inputs_embeds
1153
+
1154
+ # decoder layers
1155
+ all_hidden_states = () if output_hidden_states else None
1156
+ all_self_attns = () if output_attentions else None
1157
+ all_router_logits = () if output_router_logits else None
1158
+ next_decoder_cache = None
1159
+
1160
+ for block in self.blocks:
1161
+ if output_hidden_states:
1162
+ all_hidden_states += (hidden_states,)
1163
+
1164
+ if self.gradient_checkpointing and self.training:
1165
+ block_outputs = self._gradient_checkpointing_func(
1166
+ block.__call__,
1167
+ hidden_states,
1168
+ causal_mask,
1169
+ position_ids,
1170
+ past_key_values,
1171
+ output_attentions,
1172
+ output_router_logits,
1173
+ use_cache,
1174
+ cache_position,
1175
+ )
1176
+ else:
1177
+ block_outputs = block(
1178
+ hidden_states,
1179
+ attention_mask=causal_mask,
1180
+ position_ids=position_ids,
1181
+ past_key_value=past_key_values,
1182
+ output_attentions=output_attentions,
1183
+ output_router_logits=output_router_logits,
1184
+ use_cache=use_cache,
1185
+ cache_position=cache_position,
1186
+ )
1187
+
1188
+ hidden_states = block_outputs[0]
1189
+
1190
+ if use_cache:
1191
+ next_decoder_cache = block_outputs[2 if output_attentions else 1]
1192
+
1193
+ if output_attentions:
1194
+ all_self_attns += (block_outputs[1],)
1195
+
1196
+ if output_router_logits:
1197
+ all_router_logits += (block_outputs[-1],)
1198
+
1199
+ hidden_states = self.norm_f(hidden_states)
1200
+
1201
+ # add hidden states from the last decoder layer
1202
+ if output_hidden_states:
1203
+ all_hidden_states += (hidden_states,)
1204
+
1205
+ next_cache = None
1206
+ if use_cache:
1207
+ next_cache = (
1208
+ next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
1209
+ )
1210
+ if not return_dict:
1211
+ return tuple(
1212
+ v
1213
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
1214
+ if v is not None
1215
+ )
1216
+ return MoeModelOutputWithPast(
1217
+ last_hidden_state=hidden_states,
1218
+ past_key_values=next_cache,
1219
+ hidden_states=all_hidden_states,
1220
+ attentions=all_self_attns,
1221
+ router_logits=all_router_logits,
1222
+ )
1223
+
1224
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
1225
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
1226
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
1227
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
1228
+ def _update_causal_mask(
1229
+ self, attention_mask: Optional[torch.Tensor], input_tensor: torch.Tensor, cache_position: torch.Tensor
1230
+ ) -> Optional[torch.Tensor]:
1231
+ if self.config._attn_implementation == "flash_attention_2":
1232
+ if attention_mask is not None and 0.0 in attention_mask:
1233
+ return attention_mask
1234
+ return None
1235
+
1236
+ dtype, device = input_tensor.dtype, input_tensor.device
1237
+ min_dtype = torch.finfo(dtype).min
1238
+ sequence_length = input_tensor.shape[1]
1239
+ if hasattr(self.blocks[0].norm_attn_norm.attn, "past_key_value"): # static cache
1240
+ target_length = self.config.max_position_embeddings
1241
+ else: # dynamic cache
1242
+ target_length = (
1243
+ attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else cache_position[-1] + 1
1244
+ )
1245
+ target_length = int(target_length)
1246
+
1247
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
1248
+ if sequence_length != 1:
1249
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1250
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1251
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1252
+ if attention_mask is not None:
1253
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1254
+ if attention_mask.dim() == 2:
1255
+ mask_length = attention_mask.shape[-1]
1256
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
1257
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
1258
+ elif attention_mask.dim() == 4:
1259
+ # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
1260
+ # cache. In that case, the 4D attention mask attends to the newest tokens only.
1261
+ if attention_mask.shape[-2] < cache_position[0] + sequence_length:
1262
+ offset = cache_position[0]
1263
+ else:
1264
+ offset = 0
1265
+ mask_shape = attention_mask.shape
1266
+ mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
1267
+ causal_mask[
1268
+ : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
1269
+ ] = mask_slice
1270
+
1271
+ if (
1272
+ self.config._attn_implementation == "sdpa"
1273
+ and attention_mask is not None
1274
+ and attention_mask.device.type == "cuda"
1275
+ ):
1276
+ # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).
1277
+ is_tracing = (
1278
+ torch.jit.is_tracing()
1279
+ or isinstance(input_tensor, torch.fx.Proxy)
1280
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
1281
+ )
1282
+ if not is_tracing and torch.any(attention_mask != 1):
1283
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1284
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1285
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1286
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1287
+
1288
+ return causal_mask
1289
+
1290
+
1291
+ @add_start_docstrings("The DBRX Model transformer for causal language modeling.", DBRX_START_DOCSTRING)
1292
+ class DbrxForCausalLM(DbrxPreTrainedModel):
1293
+ def __init__(self, config: DbrxConfig):
1294
+ super().__init__(config)
1295
+ self.transformer = DbrxModel(config)
1296
+ self.vocab_size = config.vocab_size
1297
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1298
+ self.moe_loss_weight = config.ffn_config.moe_loss_weight
1299
+ self.num_experts = config.ffn_config.moe_num_experts
1300
+ self.num_experts_per_tok = config.ffn_config.moe_top_k
1301
+
1302
+ # Initialize weights and apply final processing
1303
+ self.post_init()
1304
+
1305
+ def get_input_embeddings(self) -> nn.Embedding:
1306
+ return self.transformer.get_input_embeddings()
1307
+
1308
+ def set_input_embeddings(self, value: nn.Embedding):
1309
+ self.transformer.set_input_embeddings(value)
1310
+
1311
+ def get_output_embeddings(self) -> nn.Linear:
1312
+ return self.lm_head
1313
+
1314
+ def set_output_embeddings(self, new_embeddings: nn.Linear):
1315
+ self.lm_head = new_embeddings
1316
+
1317
+ def set_decoder(self, decoder: DbrxModel):
1318
+ self.transformer = decoder
1319
+
1320
+ def get_decoder(self) -> DbrxModel:
1321
+ return self.transformer
1322
+
1323
+ @add_start_docstrings_to_model_forward(DBRX_INPUTS_DOCSTRING)
1324
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1325
+ def forward(
1326
+ self,
1327
+ input_ids: Optional[torch.LongTensor] = None,
1328
+ attention_mask: Optional[torch.Tensor] = None,
1329
+ position_ids: Optional[torch.LongTensor] = None,
1330
+ past_key_values: Optional[Cache] = None,
1331
+ inputs_embeds: Optional[torch.Tensor] = None,
1332
+ labels: Optional[torch.LongTensor] = None,
1333
+ use_cache: Optional[bool] = None,
1334
+ output_attentions: Optional[bool] = None,
1335
+ output_hidden_states: Optional[bool] = None,
1336
+ output_router_logits: Optional[bool] = None,
1337
+ return_dict: Optional[bool] = None,
1338
+ cache_position: Optional[torch.LongTensor] = None,
1339
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
1340
+ r"""Forward function for causal language modeling.
1341
+
1342
+ Args:
1343
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1344
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1345
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1346
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1347
+
1348
+ Returns:
1349
+
1350
+ Example:
1351
+
1352
+ ```python
1353
+ >> from transformers import AutoTokenizer, DbrxForCausalLM
1354
+
1355
+ >> model = DbrxForCausalLM.from_pretrained("databricks/dbrx-instruct")
1356
+ >> tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct")
1357
+
1358
+ >> prompt = "Hey, are you conscious? Can you talk to me?"
1359
+ >> inputs = tokenizer(prompt, return_tensors="pt")
1360
+
1361
+ >> # Generate
1362
+ >> generate_ids = model.generate(inputs.input_ids, max_length=30)
1363
+ >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1364
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1365
+ ```
1366
+ """
1367
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1368
+ output_hidden_states = (
1369
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1370
+ )
1371
+ output_router_logits = (
1372
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
1373
+ )
1374
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1375
+
1376
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1377
+ outputs = self.transformer(
1378
+ input_ids=input_ids,
1379
+ attention_mask=attention_mask,
1380
+ position_ids=position_ids,
1381
+ past_key_values=past_key_values,
1382
+ inputs_embeds=inputs_embeds,
1383
+ use_cache=use_cache,
1384
+ output_attentions=output_attentions,
1385
+ output_hidden_states=output_hidden_states,
1386
+ output_router_logits=output_router_logits,
1387
+ return_dict=return_dict,
1388
+ cache_position=cache_position,
1389
+ )
1390
+
1391
+ hidden_states = outputs[0]
1392
+ logits = self.lm_head(hidden_states)
1393
+
1394
+ loss = None
1395
+ if labels is not None:
1396
+ # Shift so that tokens < n predict n
1397
+ shift_logits = logits[..., :-1, :].contiguous()
1398
+ shift_labels = labels[..., 1:].contiguous()
1399
+ # Flatten the tokens
1400
+ loss_fct = nn.CrossEntropyLoss()
1401
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1402
+ shift_labels = shift_labels.view(-1)
1403
+ # Enable model parallelism
1404
+ shift_labels = shift_labels.to(shift_logits.device)
1405
+ loss = loss_fct(shift_logits, shift_labels)
1406
+
1407
+ aux_loss = None
1408
+ if output_router_logits:
1409
+ aux_loss = load_balancing_loss_func(
1410
+ outputs.router_logits if return_dict else outputs[-1],
1411
+ self.num_experts,
1412
+ self.num_experts_per_tok,
1413
+ attention_mask,
1414
+ )
1415
+ if labels is not None and loss is not None:
1416
+ loss += self.moe_loss_weight * aux_loss.to(loss.device) # make sure to reside in the same device
1417
+
1418
+ if not return_dict:
1419
+ output = (logits,) + outputs[1:]
1420
+ if output_router_logits:
1421
+ output = (aux_loss,) + output
1422
+ return (loss,) + output if loss is not None else output
1423
+
1424
+ return MoeCausalLMOutputWithPast(
1425
+ loss=loss,
1426
+ aux_loss=aux_loss,
1427
+ logits=logits,
1428
+ past_key_values=outputs.past_key_values,
1429
+ hidden_states=outputs.hidden_states,
1430
+ attentions=outputs.attentions,
1431
+ router_logits=outputs.router_logits,
1432
+ )
1433
+
1434
+ def prepare_inputs_for_generation(
1435
+ self,
1436
+ input_ids: torch.Tensor,
1437
+ past_key_values: Optional[Cache] = None,
1438
+ attention_mask: Optional[torch.Tensor] = None,
1439
+ inputs_embeds: Optional[torch.Tensor] = None,
1440
+ **kwargs: Any,
1441
+ ) -> Dict[str, Any]:
1442
+ past_length = 0
1443
+ if past_key_values is not None:
1444
+ if isinstance(past_key_values, Cache):
1445
+ cache_length = past_key_values.get_seq_length()
1446
+ past_length = past_key_values.seen_tokens
1447
+ max_cache_length = past_key_values.get_max_length()
1448
+ else:
1449
+ cache_length = past_length = past_key_values[0][0].shape[2]
1450
+ max_cache_length = None
1451
+
1452
+ # Keep only the unprocessed tokens:
1453
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1454
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1455
+ # input)
1456
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1457
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1458
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1459
+ # input_ids based on the past_length.
1460
+ elif past_length < input_ids.shape[1]:
1461
+ input_ids = input_ids[:, past_length:]
1462
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1463
+
1464
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1465
+ if (
1466
+ max_cache_length is not None
1467
+ and attention_mask is not None
1468
+ and cache_length + input_ids.shape[1] > max_cache_length
1469
+ ):
1470
+ attention_mask = attention_mask[:, -max_cache_length:]
1471
+
1472
+ position_ids = kwargs.get("position_ids", None)
1473
+ if attention_mask is not None and position_ids is None:
1474
+ # create position_ids on the fly for batch generation
1475
+ position_ids = attention_mask.long().cumsum(-1) - 1
1476
+ position_ids.masked_fill_(attention_mask == 0, 1)
1477
+ if past_key_values:
1478
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1479
+
1480
+ if self.generation_config.cache_implementation == "static":
1481
+ # generation with static cache
1482
+ cache_position = kwargs.get("cache_position", None)
1483
+ if cache_position is None:
1484
+ past_length = 0
1485
+ else:
1486
+ past_length = cache_position[-1] + 1
1487
+ input_ids = input_ids[:, past_length:]
1488
+ position_ids = position_ids[:, past_length:] if position_ids is not None else None
1489
+
1490
+ # TODO @gante we should only keep a `cache_position` in generate, and do +=1.
1491
+ # same goes for position ids. Could also help with continued generation.
1492
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
1493
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
1494
+ position_ids = position_ids.contiguous() if position_ids is not None else None
1495
+
1496
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1497
+ if inputs_embeds is not None and past_key_values is None:
1498
+ model_inputs = {"inputs_embeds": inputs_embeds}
1499
+ else:
1500
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
1501
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
1502
+ # TODO: use `next_tokens` directly instead.
1503
+ model_inputs = {"input_ids": input_ids.contiguous()}
1504
+
1505
+ model_inputs.update(
1506
+ {
1507
+ "position_ids": position_ids,
1508
+ "cache_position": cache_position,
1509
+ "past_key_values": past_key_values,
1510
+ "use_cache": kwargs.get("use_cache"),
1511
+ "attention_mask": attention_mask,
1512
+ }
1513
+ )
1514
+ return model_inputs
1515
+
1516
+ @staticmethod
1517
+ def _reorder_cache(past_key_values: Cache, beam_idx: torch.LongTensor):
1518
+ reordered_past = ()
1519
+ for layer_past in past_key_values:
1520
+ reordered_past += (
1521
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1522
+ )
1523
+ return reordered_past
venv/lib/python3.10/site-packages/transformers/models/esm/__init__.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Facebook and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig"],
21
+ "tokenization_esm": ["EsmTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_esm"] = [
31
+ "ESM_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "EsmForMaskedLM",
33
+ "EsmForSequenceClassification",
34
+ "EsmForTokenClassification",
35
+ "EsmModel",
36
+ "EsmPreTrainedModel",
37
+ ]
38
+ _import_structure["modeling_esmfold"] = ["EsmForProteinFolding", "EsmFoldPreTrainedModel"]
39
+
40
+ try:
41
+ if not is_tf_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_tf_esm"] = [
47
+ "TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "TFEsmForMaskedLM",
49
+ "TFEsmForSequenceClassification",
50
+ "TFEsmForTokenClassification",
51
+ "TFEsmModel",
52
+ "TFEsmPreTrainedModel",
53
+ ]
54
+
55
+ if TYPE_CHECKING:
56
+ from .configuration_esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig
57
+ from .tokenization_esm import EsmTokenizer
58
+
59
+ try:
60
+ if not is_torch_available():
61
+ raise OptionalDependencyNotAvailable()
62
+ except OptionalDependencyNotAvailable:
63
+ pass
64
+ else:
65
+ from .modeling_esm import (
66
+ ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
67
+ EsmForMaskedLM,
68
+ EsmForSequenceClassification,
69
+ EsmForTokenClassification,
70
+ EsmModel,
71
+ EsmPreTrainedModel,
72
+ )
73
+ from .modeling_esmfold import EsmFoldPreTrainedModel, EsmForProteinFolding
74
+
75
+ try:
76
+ if not is_tf_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ from .modeling_tf_esm import (
82
+ TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
83
+ TFEsmForMaskedLM,
84
+ TFEsmForSequenceClassification,
85
+ TFEsmForTokenClassification,
86
+ TFEsmModel,
87
+ TFEsmPreTrainedModel,
88
+ )
89
+
90
+
91
+ else:
92
+ import sys
93
+
94
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/configuration_esm.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/convert_esm.cpython-310.pyc ADDED
Binary file (9.73 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esm.cpython-310.pyc ADDED
Binary file (36 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esmfold.cpython-310.pyc ADDED
Binary file (66.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_tf_esm.cpython-310.pyc ADDED
Binary file (43.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/tokenization_esm.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/configuration_esm.py ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ESM model configuration"""
16
+
17
+ from dataclasses import asdict, dataclass
18
+ from typing import Optional
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+ # TODO Update this
27
+
28
+ from ..deprecated._archive_maps import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class EsmConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`ESMModel`]. It is used to instantiate a ESM model
34
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the ESM
36
+ [facebook/esm-1b](https://huggingface.co/facebook/esm-1b) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ vocab_size (`int`, *optional*):
44
+ Vocabulary size of the ESM model. Defines the number of different tokens that can be represented by the
45
+ `inputs_ids` passed when calling [`ESMModel`].
46
+ mask_token_id (`int`, *optional*):
47
+ The index of the mask token in the vocabulary. This must be included in the config because of the
48
+ "mask-dropout" scaling trick, which will scale the inputs depending on the number of masked tokens.
49
+ pad_token_id (`int`, *optional*):
50
+ The index of the padding token in the vocabulary. This must be included in the config because certain parts
51
+ of the ESM code use this instead of the attention mask.
52
+ hidden_size (`int`, *optional*, defaults to 768):
53
+ Dimensionality of the encoder layers and the pooler layer.
54
+ num_hidden_layers (`int`, *optional*, defaults to 12):
55
+ Number of hidden layers in the Transformer encoder.
56
+ num_attention_heads (`int`, *optional*, defaults to 12):
57
+ Number of attention heads for each attention layer in the Transformer encoder.
58
+ intermediate_size (`int`, *optional*, defaults to 3072):
59
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the attention probabilities.
64
+ max_position_embeddings (`int`, *optional*, defaults to 1026):
65
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
66
+ just in case (e.g., 512 or 1024 or 2048).
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
70
+ The epsilon used by the layer normalization layers.
71
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
72
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query", "rotary"`.
73
+ For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
74
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
75
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
76
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
77
+ is_decoder (`bool`, *optional*, defaults to `False`):
78
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
79
+ use_cache (`bool`, *optional*, defaults to `True`):
80
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
81
+ relevant if `config.is_decoder=True`.
82
+ emb_layer_norm_before (`bool`, *optional*):
83
+ Whether to apply layer normalization after embeddings but before the main stem of the network.
84
+ token_dropout (`bool`, defaults to `False`):
85
+ When this is enabled, masked tokens are treated as if they had been dropped out by input dropout.
86
+
87
+ Examples:
88
+
89
+ ```python
90
+ >>> from transformers import EsmModel, EsmConfig
91
+
92
+ >>> # Initializing a ESM facebook/esm-1b style configuration >>> configuration = EsmConfig()
93
+
94
+ >>> # Initializing a model from the configuration >>> model = ESMModel(configuration)
95
+
96
+ >>> # Accessing the model configuration >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "esm"
100
+
101
+ def __init__(
102
+ self,
103
+ vocab_size=None,
104
+ mask_token_id=None,
105
+ pad_token_id=None,
106
+ hidden_size=768,
107
+ num_hidden_layers=12,
108
+ num_attention_heads=12,
109
+ intermediate_size=3072,
110
+ hidden_dropout_prob=0.1,
111
+ attention_probs_dropout_prob=0.1,
112
+ max_position_embeddings=1026,
113
+ initializer_range=0.02,
114
+ layer_norm_eps=1e-12,
115
+ position_embedding_type="absolute",
116
+ use_cache=True,
117
+ emb_layer_norm_before=None,
118
+ token_dropout=False,
119
+ is_folding_model=False,
120
+ esmfold_config=None,
121
+ vocab_list=None,
122
+ **kwargs,
123
+ ):
124
+ super().__init__(pad_token_id=pad_token_id, mask_token_id=mask_token_id, **kwargs)
125
+
126
+ self.vocab_size = vocab_size
127
+ self.hidden_size = hidden_size
128
+ self.num_hidden_layers = num_hidden_layers
129
+ self.num_attention_heads = num_attention_heads
130
+ self.intermediate_size = intermediate_size
131
+ self.hidden_dropout_prob = hidden_dropout_prob
132
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
133
+ self.max_position_embeddings = max_position_embeddings
134
+ self.initializer_range = initializer_range
135
+ self.layer_norm_eps = layer_norm_eps
136
+ self.position_embedding_type = position_embedding_type
137
+ self.use_cache = use_cache
138
+ self.emb_layer_norm_before = emb_layer_norm_before
139
+ self.token_dropout = token_dropout
140
+ self.is_folding_model = is_folding_model
141
+ if is_folding_model:
142
+ if esmfold_config is None:
143
+ logger.info("No esmfold_config supplied for folding model, using default values.")
144
+ esmfold_config = EsmFoldConfig()
145
+ elif isinstance(esmfold_config, dict):
146
+ esmfold_config = EsmFoldConfig(**esmfold_config)
147
+ self.esmfold_config = esmfold_config
148
+ if vocab_list is None:
149
+ logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!")
150
+ self.vocab_list = get_default_vocab_list()
151
+ else:
152
+ self.vocab_list = vocab_list
153
+ else:
154
+ self.esmfold_config = None
155
+ self.vocab_list = None
156
+ if self.esmfold_config is not None and getattr(self.esmfold_config, "use_esm_attn_map", False):
157
+ raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!")
158
+
159
+ def to_dict(self):
160
+ """
161
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
162
+
163
+ Returns:
164
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
165
+ """
166
+ output = super().to_dict()
167
+ if isinstance(self.esmfold_config, EsmFoldConfig):
168
+ output["esmfold_config"] = self.esmfold_config.to_dict()
169
+ return output
170
+
171
+
172
+ @dataclass
173
+ class EsmFoldConfig:
174
+ esm_type: str = None
175
+ fp16_esm: bool = True
176
+ use_esm_attn_map: bool = False
177
+ esm_ablate_pairwise: bool = False
178
+ esm_ablate_sequence: bool = False
179
+ esm_input_dropout: float = 0
180
+
181
+ embed_aa: bool = True
182
+ bypass_lm: bool = False
183
+
184
+ lddt_head_hid_dim: int = 128
185
+ trunk: "TrunkConfig" = None
186
+
187
+ def __post_init__(self):
188
+ if self.trunk is None:
189
+ self.trunk = TrunkConfig()
190
+ elif isinstance(self.trunk, dict):
191
+ self.trunk = TrunkConfig(**self.trunk)
192
+
193
+ def to_dict(self):
194
+ """
195
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
196
+
197
+ Returns:
198
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
199
+ """
200
+ output = asdict(self)
201
+ output["trunk"] = self.trunk.to_dict()
202
+ return output
203
+
204
+
205
+ @dataclass
206
+ class TrunkConfig:
207
+ num_blocks: int = 48
208
+ sequence_state_dim: int = 1024
209
+ pairwise_state_dim: int = 128
210
+ sequence_head_width: int = 32
211
+ pairwise_head_width: int = 32
212
+ position_bins: int = 32
213
+ dropout: float = 0
214
+ layer_drop: float = 0
215
+ cpu_grad_checkpoint: bool = False
216
+ max_recycles: int = 4
217
+ chunk_size: Optional[int] = 128
218
+ structure_module: "StructureModuleConfig" = None
219
+
220
+ def __post_init__(self):
221
+ if self.structure_module is None:
222
+ self.structure_module = StructureModuleConfig()
223
+ elif isinstance(self.structure_module, dict):
224
+ self.structure_module = StructureModuleConfig(**self.structure_module)
225
+
226
+ if self.max_recycles <= 0:
227
+ raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}.")
228
+ if self.sequence_state_dim % self.sequence_state_dim != 0:
229
+ raise ValueError(
230
+ "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
231
+ f" {self.sequence_state_dim} and {self.sequence_state_dim}."
232
+ )
233
+ if self.pairwise_state_dim % self.pairwise_state_dim != 0:
234
+ raise ValueError(
235
+ "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
236
+ f" {self.pairwise_state_dim} and {self.pairwise_state_dim}."
237
+ )
238
+
239
+ sequence_num_heads = self.sequence_state_dim // self.sequence_head_width
240
+ pairwise_num_heads = self.pairwise_state_dim // self.pairwise_head_width
241
+
242
+ if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
243
+ raise ValueError(
244
+ "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
245
+ f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}."
246
+ )
247
+ if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
248
+ raise ValueError(
249
+ "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
250
+ f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}."
251
+ )
252
+ if self.pairwise_state_dim % 2 != 0:
253
+ raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.")
254
+
255
+ if self.dropout >= 0.4:
256
+ raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}.")
257
+
258
+ def to_dict(self):
259
+ """
260
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
261
+
262
+ Returns:
263
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
264
+ """
265
+ output = asdict(self)
266
+ output["structure_module"] = self.structure_module.to_dict()
267
+ return output
268
+
269
+
270
+ @dataclass
271
+ class StructureModuleConfig:
272
+ """
273
+ Args:
274
+ sequence_dim:
275
+ Single representation channel dimension
276
+ pairwise_dim:
277
+ Pair representation channel dimension
278
+ ipa_dim:
279
+ IPA hidden channel dimension
280
+ resnet_dim:
281
+ Angle resnet (Alg. 23 lines 11-14) hidden channel dimension
282
+ num_heads_ipa:
283
+ Number of IPA heads
284
+ num_qk_points:
285
+ Number of query/key points to generate during IPA
286
+ num_v_points:
287
+ Number of value points to generate during IPA
288
+ dropout_rate:
289
+ Dropout rate used throughout the layer
290
+ num_blocks:
291
+ Number of structure module blocks
292
+ num_transition_layers:
293
+ Number of layers in the single representation transition (Alg. 23 lines 8-9)
294
+ num_resnet_blocks:
295
+ Number of blocks in the angle resnet
296
+ num_angles:
297
+ Number of angles to generate in the angle resnet
298
+ trans_scale_factor:
299
+ Scale of single representation transition hidden dimension
300
+ epsilon:
301
+ Small number used in angle resnet normalization
302
+ inf:
303
+ Large number used for attention masking
304
+ """
305
+
306
+ sequence_dim: int = 384
307
+ pairwise_dim: int = 128
308
+ ipa_dim: int = 16
309
+ resnet_dim: int = 128
310
+ num_heads_ipa: int = 12
311
+ num_qk_points: int = 4
312
+ num_v_points: int = 8
313
+ dropout_rate: float = 0.1
314
+ num_blocks: int = 8
315
+ num_transition_layers: int = 1
316
+ num_resnet_blocks: int = 2
317
+ num_angles: int = 7
318
+ trans_scale_factor: int = 10
319
+ epsilon: float = 1e-8
320
+ inf: float = 1e5
321
+
322
+ def to_dict(self):
323
+ return asdict(self)
324
+
325
+
326
+ def get_default_vocab_list():
327
+ return (
328
+ "<cls>",
329
+ "<pad>",
330
+ "<eos>",
331
+ "<unk>",
332
+ "L",
333
+ "A",
334
+ "G",
335
+ "V",
336
+ "S",
337
+ "E",
338
+ "R",
339
+ "T",
340
+ "I",
341
+ "D",
342
+ "P",
343
+ "K",
344
+ "Q",
345
+ "N",
346
+ "F",
347
+ "Y",
348
+ "M",
349
+ "H",
350
+ "W",
351
+ "C",
352
+ "X",
353
+ "B",
354
+ "U",
355
+ "Z",
356
+ "O",
357
+ ".",
358
+ "-",
359
+ "<null_1>",
360
+ "<mask>",
361
+ )
venv/lib/python3.10/site-packages/transformers/models/esm/convert_esm.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ESM checkpoint."""
16
+
17
+
18
+ import argparse
19
+ import pathlib
20
+ from pathlib import Path
21
+ from tempfile import TemporaryDirectory
22
+
23
+ import esm as esm_module
24
+ import torch
25
+ from esm.esmfold.v1.misc import batch_encode_sequences as esmfold_encode_sequences
26
+ from esm.esmfold.v1.pretrained import esmfold_v1
27
+
28
+ from transformers.models.esm.configuration_esm import EsmConfig, EsmFoldConfig
29
+ from transformers.models.esm.modeling_esm import (
30
+ EsmForMaskedLM,
31
+ EsmForSequenceClassification,
32
+ EsmIntermediate,
33
+ EsmLayer,
34
+ EsmOutput,
35
+ EsmSelfAttention,
36
+ EsmSelfOutput,
37
+ )
38
+ from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
39
+ from transformers.models.esm.tokenization_esm import EsmTokenizer
40
+ from transformers.utils import logging
41
+
42
+
43
+ logging.set_verbosity_info()
44
+ logger = logging.get_logger(__name__)
45
+
46
+ SAMPLE_DATA = [
47
+ (
48
+ "protein1",
49
+ "MNGTEGPNFYVPFSNATGVVRSPFEYPQYYLAEPWQFSMLAAYMFLLIVLGFPINFLTLYVTVQHKKLRTPLNYILLNLAVADLFMVLGGFTSTLYTSLHGYFVFGPTGCNLEGFFATLGGEIALWSLVVLAIERYVVVCKPMSNFRFGENHAIMGVAFTWVMALACAAPPLAGWSRYIPEGLQCSCGIDYYTLKPEVNNESFVIYMFVVHFTIPMIIIFFCYGQLVFTVKEAAAQQQESATTQKAEKEVTRMVIIMVIAFLICWVPYASVAFYIFTHQGSNFGPIFMTIPAFFAKSAAIYNPVIYIMMNKQFRNCMLTTICCGKNPLGDDEASATVSKTETSQVAPA",
50
+ ),
51
+ ("protein2", "MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLA"),
52
+ ("protein3", "MKTVRQERLKSI<mask>RILERSKEPVSGAQLAEELS<mask>SRQVIVQDIAYLRSLGYN<mask>VATPRGYVLAGG"),
53
+ ("protein4", "MKTVRQERLKSI<mask>RILERSKEPVSGAQLAEELS<mask>SRQVIVQDIAYLRSLGYN<mask>VATPRGYVLA"),
54
+ ]
55
+
56
+ MODEL_MAPPING = {
57
+ "esm1b_t33_650M_UR50S": esm_module.pretrained.esm1b_t33_650M_UR50S,
58
+ "esm1v_t33_650M_UR90S_1": esm_module.pretrained.esm1v_t33_650M_UR90S_1,
59
+ "esm1v_t33_650M_UR90S_2": esm_module.pretrained.esm1v_t33_650M_UR90S_2,
60
+ "esm1v_t33_650M_UR90S_3": esm_module.pretrained.esm1v_t33_650M_UR90S_3,
61
+ "esm1v_t33_650M_UR90S_4": esm_module.pretrained.esm1v_t33_650M_UR90S_4,
62
+ "esm1v_t33_650M_UR90S_5": esm_module.pretrained.esm1v_t33_650M_UR90S_5,
63
+ "esm2_t48_15B_UR50D": esm_module.pretrained.esm2_t48_15B_UR50D,
64
+ "esm2_t36_3B_UR50D": esm_module.pretrained.esm2_t36_3B_UR50D,
65
+ "esm2_t33_650M_UR50D": esm_module.pretrained.esm2_t33_650M_UR50D,
66
+ "esm2_t30_150M_UR50D": esm_module.pretrained.esm2_t30_150M_UR50D,
67
+ "esm2_t12_35M_UR50D": esm_module.pretrained.esm2_t12_35M_UR50D,
68
+ "esm2_t6_8M_UR50D": esm_module.pretrained.esm2_t6_8M_UR50D,
69
+ "esmfold_v1": esmfold_v1,
70
+ }
71
+
72
+ restypes = list("ARNDCQEGHILKMFPSTWYV")
73
+
74
+ restypes_with_x = restypes + ["X"]
75
+ restypes_with_extras = restypes_with_x + ["<pad>", "<mask>", "<cls>", "<sep>", "<eos>"]
76
+
77
+
78
+ def get_esmfold_tokenizer():
79
+ with TemporaryDirectory() as tempdir:
80
+ vocab = "\n".join(restypes_with_extras)
81
+ vocab_file = Path(tempdir) / "vocab.txt"
82
+ vocab_file.write_text(vocab)
83
+ hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file))
84
+ hf_tokenizer.pad_token_id = 0 # Overlaps with 'A' but that seems to be what they want
85
+ return hf_tokenizer
86
+
87
+
88
+ def transfer_and_check_weights(original_module, our_module):
89
+ status = our_module.load_state_dict(original_module.state_dict())
90
+ if status.missing_keys:
91
+ raise ValueError(f"Missing keys: {status.missing_keys}")
92
+ if status.unexpected_keys:
93
+ raise ValueError(f"Unexpected keys: {status.unexpected_keys}")
94
+
95
+
96
+ def convert_esm_checkpoint_to_pytorch(
97
+ model: str, pytorch_dump_folder_path: str, classification_head: bool, push_to_repo: str, auth_token: str
98
+ ):
99
+ """
100
+ Copy/paste/tweak esm's weights to our BERT structure.
101
+ """
102
+ if model.startswith("esmfold"):
103
+ esm = MODEL_MAPPING[model]()
104
+ else:
105
+ esm, alphabet = MODEL_MAPPING[model]()
106
+ esm.eval() # disable dropout
107
+
108
+ if model.startswith("esmfold"):
109
+ embed_dim = esm.esm.embed_dim
110
+ num_layers = esm.esm.num_layers
111
+ num_attention_heads = esm.esm.attention_heads
112
+ intermediate_size = 4 * embed_dim
113
+ token_dropout = esm.esm.token_dropout
114
+ emb_layer_norm_before = False # This code path does not exist in ESM-2
115
+ position_embedding_type = "rotary"
116
+ is_folding_model = True
117
+ esmfold_config = EsmFoldConfig()
118
+ for key, val in esm.cfg.items():
119
+ if hasattr(esmfold_config, key) and key != "trunk":
120
+ setattr(esmfold_config, key, val)
121
+ for key, val in esm.cfg.trunk.items():
122
+ if hasattr(esmfold_config.trunk, key) and key != "structure_module":
123
+ setattr(esmfold_config.trunk, key, val)
124
+ for key, val in esm.cfg.trunk.structure_module.items():
125
+ if hasattr(esmfold_config.trunk.structure_module, key):
126
+ setattr(esmfold_config.trunk.structure_module, key, val)
127
+ elif hasattr(esm, "args"):
128
+ # Indicates an ESM-1b or ESM-1v model
129
+ embed_dim = esm.args.embed_dim
130
+ num_layers = esm.args.layers
131
+ num_attention_heads = esm.args.attention_heads
132
+ intermediate_size = esm.args.ffn_embed_dim
133
+ token_dropout = esm.args.token_dropout
134
+ emb_layer_norm_before = True if esm.emb_layer_norm_before else False
135
+ position_embedding_type = "absolute"
136
+ is_folding_model = False
137
+ esmfold_config = None
138
+ else:
139
+ # Indicates an ESM-2 model
140
+ embed_dim = esm.embed_dim
141
+ num_layers = esm.num_layers
142
+ num_attention_heads = esm.attention_heads
143
+ intermediate_size = 4 * embed_dim # This is hardcoded in ESM-2
144
+ token_dropout = esm.token_dropout
145
+ emb_layer_norm_before = False # This code path does not exist in ESM-2
146
+ position_embedding_type = "rotary"
147
+ is_folding_model = False
148
+ esmfold_config = None
149
+
150
+ if is_folding_model:
151
+ alphabet = esm.esm.alphabet
152
+ vocab_list = tuple(alphabet.all_toks)
153
+ mask_token_id = alphabet.mask_idx
154
+ pad_token_id = alphabet.padding_idx
155
+
156
+ if is_folding_model:
157
+ original_esm_model = esm.esm
158
+ else:
159
+ original_esm_model = esm
160
+
161
+ config = EsmConfig(
162
+ vocab_size=original_esm_model.embed_tokens.num_embeddings,
163
+ mask_token_id=mask_token_id,
164
+ hidden_size=embed_dim,
165
+ num_hidden_layers=num_layers,
166
+ num_attention_heads=num_attention_heads,
167
+ intermediate_size=intermediate_size,
168
+ max_position_embeddings=1026,
169
+ layer_norm_eps=1e-5, # PyTorch default used in fairseq
170
+ attention_probs_dropout_prob=0.0,
171
+ hidden_dropout_prob=0.0,
172
+ pad_token_id=pad_token_id,
173
+ emb_layer_norm_before=emb_layer_norm_before,
174
+ token_dropout=token_dropout,
175
+ position_embedding_type=position_embedding_type,
176
+ is_folding_model=is_folding_model,
177
+ esmfold_config=esmfold_config,
178
+ vocab_list=vocab_list,
179
+ )
180
+ if classification_head:
181
+ config.num_labels = esm.classification_heads["mnli"].out_proj.weight.shape[0]
182
+ print("Our ESM config:", config)
183
+
184
+ if model.startswith("esmfold"):
185
+ model_class = EsmForProteinFolding
186
+ elif classification_head:
187
+ model_class = EsmForSequenceClassification
188
+ else:
189
+ model_class = EsmForMaskedLM
190
+ model = model_class(config)
191
+ model.eval()
192
+
193
+ # Now let's copy all the weights.
194
+ # Embeddings
195
+ model.esm.embeddings.word_embeddings.weight = original_esm_model.embed_tokens.weight
196
+ if position_embedding_type == "absolute":
197
+ model.esm.embeddings.position_embeddings.weight = original_esm_model.embed_positions.weight
198
+
199
+ if config.emb_layer_norm_before:
200
+ model.esm.embeddings.layer_norm.weight = original_esm_model.emb_layer_norm_before.weight
201
+ model.esm.embeddings.layer_norm.bias = original_esm_model.emb_layer_norm_before.bias
202
+
203
+ model.esm.encoder.emb_layer_norm_after.weight = original_esm_model.emb_layer_norm_after.weight
204
+ model.esm.encoder.emb_layer_norm_after.bias = original_esm_model.emb_layer_norm_after.bias
205
+
206
+ for i in range(config.num_hidden_layers):
207
+ # Encoder: start of layer
208
+ layer: EsmLayer = model.esm.encoder.layer[i]
209
+ # esm_layer: TransformerSentenceEncoderLayer = original_esm_model.layers[i]
210
+ esm_layer = original_esm_model.layers[i]
211
+
212
+ # self attention
213
+ self_attn: EsmSelfAttention = layer.attention.self
214
+ assert (
215
+ esm_layer.self_attn.k_proj.weight.data.shape
216
+ == esm_layer.self_attn.q_proj.weight.data.shape
217
+ == esm_layer.self_attn.v_proj.weight.data.shape
218
+ == torch.Size((config.hidden_size, config.hidden_size))
219
+ )
220
+
221
+ self_attn.query.weight.data = esm_layer.self_attn.q_proj.weight
222
+ self_attn.query.bias.data = esm_layer.self_attn.q_proj.bias
223
+ self_attn.key.weight.data = esm_layer.self_attn.k_proj.weight
224
+ self_attn.key.bias.data = esm_layer.self_attn.k_proj.bias
225
+ self_attn.value.weight.data = esm_layer.self_attn.v_proj.weight
226
+ self_attn.value.bias.data = esm_layer.self_attn.v_proj.bias
227
+
228
+ if getattr(esm_layer.self_attn, "rot_emb", None) is not None:
229
+ # Matt: Although inv_freq is not a trainable weight, it is computed at model init and cached.
230
+ # During the training of ESM-2 the model was converted to float16 precision, which also converts
231
+ # the inv_freq tensor, and the loss of precision remains even if the model is loaded later as float32.
232
+ # If we recompute inv_freq without this loss of precision then we will get subtly different rotary
233
+ # embeddings, which are enough to cause significant discrepancies in model outputs. To avoid this,
234
+ # we make sure the new model copies the data from the old inv_freq.
235
+ self_attn.rotary_embeddings.inv_freq.data = esm_layer.self_attn.rot_emb.inv_freq
236
+
237
+ # LayerNorm changes for pre-activation
238
+ layer.attention.LayerNorm.weight = esm_layer.self_attn_layer_norm.weight
239
+ layer.attention.LayerNorm.bias = esm_layer.self_attn_layer_norm.bias
240
+ layer.LayerNorm.weight = esm_layer.final_layer_norm.weight
241
+ layer.LayerNorm.bias = esm_layer.final_layer_norm.bias
242
+
243
+ # self-attention output
244
+ self_output: EsmSelfOutput = layer.attention.output
245
+ assert self_output.dense.weight.shape == esm_layer.self_attn.out_proj.weight.shape
246
+ self_output.dense.weight = esm_layer.self_attn.out_proj.weight
247
+ self_output.dense.bias = esm_layer.self_attn.out_proj.bias
248
+
249
+ # intermediate
250
+ intermediate: EsmIntermediate = layer.intermediate
251
+ assert intermediate.dense.weight.shape == esm_layer.fc1.weight.shape
252
+ intermediate.dense.weight = esm_layer.fc1.weight
253
+ intermediate.dense.bias = esm_layer.fc1.bias
254
+
255
+ # output
256
+ bert_output: EsmOutput = layer.output
257
+ assert bert_output.dense.weight.shape == esm_layer.fc2.weight.shape
258
+ bert_output.dense.weight = esm_layer.fc2.weight
259
+ bert_output.dense.bias = esm_layer.fc2.bias
260
+ # end of layer
261
+
262
+ if is_folding_model:
263
+ model.esm_s_combine.data = esm.esm_s_combine.data
264
+ model.af2_to_esm.data = esm.af2_to_esm.data
265
+ transfer_and_check_weights(esm.embedding, model.embedding)
266
+ transfer_and_check_weights(esm.esm_s_mlp, model.esm_s_mlp)
267
+ transfer_and_check_weights(esm.trunk, model.trunk)
268
+ transfer_and_check_weights(esm.distogram_head, model.distogram_head)
269
+ transfer_and_check_weights(esm.ptm_head, model.ptm_head)
270
+ transfer_and_check_weights(esm.lm_head, model.lm_head)
271
+ transfer_and_check_weights(esm.lddt_head, model.lddt_head)
272
+
273
+ elif classification_head:
274
+ model.classifier.dense.weight = esm.esm.classification_heads["mnli"].dense.weight
275
+ model.classifier.dense.bias = esm.classification_heads["mnli"].dense.bias
276
+ model.classifier.out_proj.weight = esm.classification_heads["mnli"].out_proj.weight
277
+ model.classifier.out_proj.bias = esm.classification_heads["mnli"].out_proj.bias
278
+ else:
279
+ # LM Head
280
+ model.lm_head.dense.weight = esm.lm_head.dense.weight
281
+ model.lm_head.dense.bias = esm.lm_head.dense.bias
282
+ model.lm_head.layer_norm.weight = esm.lm_head.layer_norm.weight
283
+ model.lm_head.layer_norm.bias = esm.lm_head.layer_norm.bias
284
+ model.lm_head.decoder.weight = esm.lm_head.weight
285
+ model.lm_head.bias = esm.lm_head.bias
286
+
287
+ # Contact prediction head
288
+ transfer_and_check_weights(esm.contact_head, model.esm.contact_head)
289
+
290
+ # Prepare data (first 2 sequences from ESMStructuralSplitDataset superfamily / 4)
291
+ if is_folding_model:
292
+ # Folding models aren't trained on masked inputs and don't like mask tokens.
293
+ sample_data = SAMPLE_DATA[:2]
294
+ else:
295
+ sample_data = SAMPLE_DATA
296
+
297
+ if is_folding_model:
298
+ hf_tokenizer = get_esmfold_tokenizer()
299
+ hf_tokens = hf_tokenizer(
300
+ [row[1] for row in sample_data], return_tensors="pt", padding=True, add_special_tokens=False
301
+ )
302
+ esmfold_aas, esmfold_mask, _, _, _ = esmfold_encode_sequences([row[1] for row in sample_data])
303
+ success = torch.all(hf_tokens["input_ids"] == esmfold_aas) and torch.all(
304
+ hf_tokens["attention_mask"] == esmfold_mask
305
+ )
306
+ else:
307
+ # Let's check that we get the same results.
308
+ batch_converter = alphabet.get_batch_converter()
309
+ batch_labels, batch_strs, batch_tokens = batch_converter(sample_data)
310
+ # Prepare tokenizer and make sure it matches
311
+ with TemporaryDirectory() as tempdir:
312
+ vocab = "\n".join(alphabet.all_toks)
313
+ vocab_file = Path(tempdir) / "vocab.txt"
314
+ vocab_file.write_text(vocab)
315
+ hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file))
316
+
317
+ hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors="pt", padding=True)
318
+ success = torch.all(hf_tokens["input_ids"] == batch_tokens)
319
+
320
+ print("Do both models tokenizers output the same tokens?", "🔥" if success else "💩")
321
+ if not success:
322
+ raise Exception("Tokenization does not match!")
323
+
324
+ with torch.no_grad():
325
+ if is_folding_model:
326
+ # Let's test the model in parts
327
+ # ESMFold always converts the ESM stem to float16, which requires float16 ops
328
+ # that don't exist on CPU. Therefore, to test it we need to run it on GPU. However,
329
+ # ESMFold is what we in the community call a "big boy" and so we desperately avoid putting both the
330
+ # original and the converted model on the GPU at the same time.
331
+ their_output = esm.cuda().infer([row[1] for row in sample_data])
332
+ our_output = model.cuda()(
333
+ input_ids=hf_tokens["input_ids"].cuda(), attention_mask=hf_tokens["attention_mask"].cuda()
334
+ )
335
+ else:
336
+ our_output = model(**hf_tokens, output_hidden_states=True)
337
+ our_output = our_output["logits"]
338
+ if classification_head:
339
+ their_output = esm.model.classification_heads["mnli"](esm.extract_features(batch_tokens))
340
+ else:
341
+ their_output = esm(hf_tokens["input_ids"], repr_layers=list(range(999)))
342
+ their_output = their_output["logits"]
343
+
344
+ if is_folding_model:
345
+ max_absolute_diff = torch.max(torch.abs(our_output["positions"] - their_output["positions"])).item()
346
+ success = torch.allclose(our_output["positions"], their_output["positions"], atol=1e-5)
347
+ else:
348
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
349
+ success = torch.allclose(our_output, their_output, atol=1e-5)
350
+
351
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5
352
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
353
+
354
+ if not success:
355
+ raise Exception("Something went wRoNg")
356
+
357
+ if not is_folding_model:
358
+ # Let's check contact prediction too
359
+ our_output = model.predict_contacts(hf_tokens["input_ids"], hf_tokens["attention_mask"])
360
+ their_output = esm.predict_contacts(hf_tokens["input_ids"])
361
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
362
+ success = torch.allclose(our_output, their_output, atol=1e-5)
363
+
364
+ print("Contact prediction testing:")
365
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5
366
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
367
+
368
+ if not success:
369
+ raise Exception("Something went wRoNg")
370
+
371
+ pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
372
+ print(f"Saving model to {pytorch_dump_folder_path}")
373
+ model.save_pretrained(pytorch_dump_folder_path)
374
+
375
+ del esm # Free up some memory before continuing
376
+
377
+ print(f"Saving tokenizer to {pytorch_dump_folder_path}")
378
+ hf_tokenizer.save_pretrained(pytorch_dump_folder_path)
379
+
380
+ if push_to_repo:
381
+ model.push_to_hub(repo_id=push_to_repo, token_token=auth_token)
382
+ hf_tokenizer.push_to_hub(repo_id=push_to_repo, token_token=auth_token)
383
+
384
+
385
+ if __name__ == "__main__":
386
+ parser = argparse.ArgumentParser()
387
+ # Required parameters
388
+ parser.add_argument(
389
+ "--pytorch_dump_folder_path", type=str, required=True, help="Path to the output PyTorch model."
390
+ )
391
+ parser.add_argument(
392
+ "--classification_head", action="store_true", help="Whether to convert a final classification head."
393
+ )
394
+ parser.add_argument("--model", default=None, type=str, required=True, help="Name of model to convert.")
395
+ parser.add_argument("--push_to_repo", type=str, help="Repo to upload to (including username!).")
396
+ parser.add_argument("--auth_token", type=str, help="HuggingFace auth token.")
397
+ args = parser.parse_args()
398
+ convert_esm_checkpoint_to_pytorch(
399
+ args.model, args.pytorch_dump_folder_path, args.classification_head, args.push_to_repo, args.auth_token
400
+ )
venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esm.py ADDED
@@ -0,0 +1,1265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ESM model."""
16
+
17
+ import math
18
+ from typing import List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ BaseModelOutputWithPoolingAndCrossAttentions,
29
+ MaskedLMOutput,
30
+ SequenceClassifierOutput,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
34
+ from ...utils import logging
35
+ from .configuration_esm import EsmConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D"
41
+ _CONFIG_FOR_DOC = "EsmConfig"
42
+
43
+
44
+ from ..deprecated._archive_maps import ESM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
45
+
46
+
47
+ def rotate_half(x):
48
+ x1, x2 = x.chunk(2, dim=-1)
49
+ return torch.cat((-x2, x1), dim=-1)
50
+
51
+
52
+ def apply_rotary_pos_emb(x, cos, sin):
53
+ cos = cos[:, :, : x.shape[-2], :]
54
+ sin = sin[:, :, : x.shape[-2], :]
55
+
56
+ return (x * cos) + (rotate_half(x) * sin)
57
+
58
+
59
+ def gelu(x):
60
+ """
61
+ This is the gelu implementation from the original ESM repo. Using F.gelu yields subtly wrong results.
62
+ """
63
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
64
+
65
+
66
+ def symmetrize(x):
67
+ "Make layer symmetric in final two dimensions, used for contact prediction."
68
+ return x + x.transpose(-1, -2)
69
+
70
+
71
+ def average_product_correct(x):
72
+ "Perform average product correct, used for contact prediction."
73
+ a1 = x.sum(-1, keepdims=True)
74
+ a2 = x.sum(-2, keepdims=True)
75
+ a12 = x.sum((-1, -2), keepdims=True)
76
+
77
+ avg = a1 * a2
78
+ avg.div_(a12) # in-place to reduce memory
79
+ normalized = x - avg
80
+ return normalized
81
+
82
+
83
+ class RotaryEmbedding(torch.nn.Module):
84
+ """
85
+ Rotary position embeddings based on those in
86
+ [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
87
+ matrices which depend on their relative positions.
88
+ """
89
+
90
+ def __init__(self, dim: int):
91
+ super().__init__()
92
+ # Generate and save the inverse frequency buffer (non trainable)
93
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
94
+ inv_freq = inv_freq
95
+ self.register_buffer("inv_freq", inv_freq)
96
+
97
+ self._seq_len_cached = None
98
+ self._cos_cached = None
99
+ self._sin_cached = None
100
+
101
+ def _update_cos_sin_tables(self, x, seq_dimension=2):
102
+ seq_len = x.shape[seq_dimension]
103
+
104
+ # Reset the tables if the sequence length has changed,
105
+ # or if we're on a new device (possibly due to tracing for instance)
106
+ if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
107
+ self._seq_len_cached = seq_len
108
+ t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq)
109
+ freqs = torch.outer(t, self.inv_freq)
110
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
111
+
112
+ self._cos_cached = emb.cos()[None, None, :, :]
113
+ self._sin_cached = emb.sin()[None, None, :, :]
114
+
115
+ return self._cos_cached, self._sin_cached
116
+
117
+ def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
118
+ self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2)
119
+
120
+ return (
121
+ apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
122
+ apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
123
+ )
124
+
125
+
126
+ class EsmContactPredictionHead(nn.Module):
127
+ """Performs symmetrization, apc, and computes a logistic regression on the output features"""
128
+
129
+ def __init__(
130
+ self,
131
+ in_features: int,
132
+ bias=True,
133
+ eos_idx: int = 2,
134
+ ):
135
+ super().__init__()
136
+ self.in_features = in_features
137
+ self.eos_idx = eos_idx
138
+ self.regression = nn.Linear(in_features, 1, bias)
139
+ self.activation = nn.Sigmoid()
140
+
141
+ def forward(self, tokens, attentions):
142
+ # remove eos token attentions
143
+ eos_mask = tokens.ne(self.eos_idx).to(attentions)
144
+ eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
145
+ attentions = attentions * eos_mask[:, None, None, :, :]
146
+ attentions = attentions[..., :-1, :-1]
147
+ # remove cls token attentions
148
+ attentions = attentions[..., 1:, 1:]
149
+ batch_size, layers, heads, seqlen, _ = attentions.size()
150
+ attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
151
+
152
+ # features: batch x channels x tokens x tokens (symmetric)
153
+ attentions = attentions.to(
154
+ self.regression.weight.device
155
+ ) # attentions always float32, may need to convert to float16
156
+ attentions = average_product_correct(symmetrize(attentions))
157
+ attentions = attentions.permute(0, 2, 3, 1)
158
+ return self.activation(self.regression(attentions).squeeze(3))
159
+
160
+
161
+ class EsmEmbeddings(nn.Module):
162
+ """
163
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
164
+ """
165
+
166
+ def __init__(self, config):
167
+ super().__init__()
168
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
169
+
170
+ if config.emb_layer_norm_before:
171
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
172
+ else:
173
+ self.layer_norm = None
174
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
175
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
176
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
177
+ self.register_buffer(
178
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
179
+ )
180
+
181
+ self.padding_idx = config.pad_token_id
182
+ self.position_embeddings = nn.Embedding(
183
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
184
+ )
185
+ self.token_dropout = config.token_dropout
186
+ self.mask_token_id = config.mask_token_id
187
+
188
+ def forward(
189
+ self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
190
+ ):
191
+ if position_ids is None:
192
+ if input_ids is not None:
193
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
194
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
195
+ else:
196
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
197
+
198
+ if inputs_embeds is None:
199
+ inputs_embeds = self.word_embeddings(input_ids)
200
+
201
+ # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an
202
+ # embedding_scale factor here.
203
+ embeddings = inputs_embeds
204
+
205
+ # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout
206
+ # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however,
207
+ # masked tokens are treated as if they were selected for input dropout and zeroed out.
208
+ # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by
209
+ # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample).
210
+ # This is analogous to the way that dropout layers scale down outputs during evaluation when not
211
+ # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training).
212
+ if self.token_dropout:
213
+ embeddings = embeddings.masked_fill((input_ids == self.mask_token_id).unsqueeze(-1), 0.0)
214
+ mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs
215
+ src_lengths = attention_mask.sum(-1)
216
+ mask_ratio_observed = (input_ids == self.mask_token_id).sum(-1).float() / src_lengths
217
+ embeddings = (embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]).to(
218
+ embeddings.dtype
219
+ )
220
+
221
+ if self.position_embedding_type == "absolute":
222
+ position_embeddings = self.position_embeddings(position_ids)
223
+ embeddings = embeddings + position_embeddings
224
+
225
+ if self.layer_norm is not None:
226
+ embeddings = self.layer_norm(embeddings)
227
+ if attention_mask is not None:
228
+ embeddings = (embeddings * attention_mask.unsqueeze(-1)).to(embeddings.dtype)
229
+ # Matt: I think this line was copied incorrectly from BERT, disabling it for now.
230
+ # embeddings = self.dropout(embeddings)
231
+ return embeddings
232
+
233
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
234
+ """
235
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
236
+
237
+ Args:
238
+ inputs_embeds: torch.Tensor
239
+
240
+ Returns: torch.Tensor
241
+ """
242
+ input_shape = inputs_embeds.size()[:-1]
243
+ sequence_length = input_shape[1]
244
+
245
+ position_ids = torch.arange(
246
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
247
+ )
248
+ return position_ids.unsqueeze(0).expand(input_shape)
249
+
250
+
251
+ class EsmSelfAttention(nn.Module):
252
+ def __init__(self, config, position_embedding_type=None):
253
+ super().__init__()
254
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
255
+ raise ValueError(
256
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
257
+ f"heads ({config.num_attention_heads})"
258
+ )
259
+
260
+ self.num_attention_heads = config.num_attention_heads
261
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
262
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
263
+
264
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
265
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
266
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
267
+
268
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
269
+ self.position_embedding_type = position_embedding_type or getattr(
270
+ config, "position_embedding_type", "absolute"
271
+ )
272
+ self.rotary_embeddings = None
273
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
274
+ self.max_position_embeddings = config.max_position_embeddings
275
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
276
+ elif self.position_embedding_type == "rotary":
277
+ self.rotary_embeddings = RotaryEmbedding(dim=self.attention_head_size)
278
+
279
+ self.is_decoder = config.is_decoder
280
+
281
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
282
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
283
+ x = x.view(new_x_shape)
284
+ return x.permute(0, 2, 1, 3)
285
+
286
+ def forward(
287
+ self,
288
+ hidden_states: torch.Tensor,
289
+ attention_mask: Optional[torch.FloatTensor] = None,
290
+ head_mask: Optional[torch.FloatTensor] = None,
291
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
292
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
293
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
294
+ output_attentions: Optional[bool] = False,
295
+ ) -> Tuple[torch.Tensor]:
296
+ mixed_query_layer = self.query(hidden_states)
297
+
298
+ # If this is instantiated as a cross-attention module, the keys
299
+ # and values come from an encoder; the attention mask needs to be
300
+ # such that the encoder's padding tokens are not attended to.
301
+ is_cross_attention = encoder_hidden_states is not None
302
+
303
+ if is_cross_attention and past_key_value is not None:
304
+ # reuse k,v, cross_attentions
305
+ key_layer = past_key_value[0]
306
+ value_layer = past_key_value[1]
307
+ attention_mask = encoder_attention_mask
308
+ elif is_cross_attention:
309
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
310
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
311
+ attention_mask = encoder_attention_mask
312
+ elif past_key_value is not None:
313
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
314
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
315
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
316
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
317
+ else:
318
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
319
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
320
+
321
+ query_layer = self.transpose_for_scores(mixed_query_layer)
322
+
323
+ # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim).
324
+ # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent,
325
+ # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original
326
+ # ESM code and fix rotary embeddings.
327
+ query_layer = query_layer * self.attention_head_size**-0.5
328
+
329
+ if self.is_decoder:
330
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
331
+ # Further calls to cross_attention layer can then reuse all cross-attention
332
+ # key/value_states (first "if" case)
333
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
334
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
335
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
336
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
337
+ past_key_value = (key_layer, value_layer)
338
+
339
+ if self.position_embedding_type == "rotary":
340
+ query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
341
+
342
+ # Take the dot product between "query" and "key" to get the raw attention scores.
343
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
344
+
345
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
346
+ seq_length = hidden_states.size()[1]
347
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
348
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
349
+ distance = position_ids_l - position_ids_r
350
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
351
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
352
+
353
+ if self.position_embedding_type == "relative_key":
354
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
355
+ attention_scores = attention_scores + relative_position_scores
356
+ elif self.position_embedding_type == "relative_key_query":
357
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
358
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
359
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
360
+
361
+ if attention_mask is not None:
362
+ # Apply the attention mask is (precomputed for all layers in EsmModel forward() function)
363
+ attention_scores = attention_scores + attention_mask
364
+
365
+ # Normalize the attention scores to probabilities.
366
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
367
+
368
+ # This is actually dropping out entire tokens to attend to, which might
369
+ # seem a bit unusual, but is taken from the original Transformer paper.
370
+ attention_probs = self.dropout(attention_probs)
371
+
372
+ # Mask heads if we want to
373
+ if head_mask is not None:
374
+ attention_probs = attention_probs * head_mask
375
+
376
+ context_layer = torch.matmul(attention_probs.to(value_layer.dtype), value_layer)
377
+
378
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
379
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
380
+ context_layer = context_layer.view(new_context_layer_shape)
381
+
382
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
383
+
384
+ if self.is_decoder:
385
+ outputs = outputs + (past_key_value,)
386
+ return outputs
387
+
388
+
389
+ class EsmSelfOutput(nn.Module):
390
+ def __init__(self, config):
391
+ super().__init__()
392
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
393
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
394
+
395
+ def forward(self, hidden_states, input_tensor):
396
+ hidden_states = self.dense(hidden_states)
397
+ hidden_states = self.dropout(hidden_states)
398
+ hidden_states = hidden_states + input_tensor
399
+ return hidden_states
400
+
401
+
402
+ class EsmAttention(nn.Module):
403
+ def __init__(self, config):
404
+ super().__init__()
405
+ self.self = EsmSelfAttention(config)
406
+ self.output = EsmSelfOutput(config)
407
+ self.pruned_heads = set()
408
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
409
+
410
+ def prune_heads(self, heads):
411
+ if len(heads) == 0:
412
+ return
413
+ heads, index = find_pruneable_heads_and_indices(
414
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
415
+ )
416
+
417
+ # Prune linear layers
418
+ self.self.query = prune_linear_layer(self.self.query, index)
419
+ self.self.key = prune_linear_layer(self.self.key, index)
420
+ self.self.value = prune_linear_layer(self.self.value, index)
421
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
422
+
423
+ # Update hyper params and store pruned heads
424
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
425
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
426
+ self.pruned_heads = self.pruned_heads.union(heads)
427
+
428
+ def forward(
429
+ self,
430
+ hidden_states,
431
+ attention_mask=None,
432
+ head_mask=None,
433
+ encoder_hidden_states=None,
434
+ encoder_attention_mask=None,
435
+ past_key_value=None,
436
+ output_attentions=False,
437
+ ):
438
+ hidden_states_ln = self.LayerNorm(hidden_states)
439
+ self_outputs = self.self(
440
+ hidden_states_ln,
441
+ attention_mask,
442
+ head_mask,
443
+ encoder_hidden_states,
444
+ encoder_attention_mask,
445
+ past_key_value,
446
+ output_attentions,
447
+ )
448
+ attention_output = self.output(self_outputs[0], hidden_states)
449
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
450
+ return outputs
451
+
452
+
453
+ class EsmIntermediate(nn.Module):
454
+ def __init__(self, config):
455
+ super().__init__()
456
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
457
+
458
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
459
+ hidden_states = self.dense(hidden_states)
460
+ hidden_states = gelu(hidden_states)
461
+ return hidden_states
462
+
463
+
464
+ class EsmOutput(nn.Module):
465
+ def __init__(self, config):
466
+ super().__init__()
467
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
468
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
469
+
470
+ def forward(self, hidden_states, input_tensor):
471
+ hidden_states = self.dense(hidden_states)
472
+ hidden_states = self.dropout(hidden_states)
473
+ hidden_states = hidden_states + input_tensor
474
+ return hidden_states
475
+
476
+
477
+ class EsmLayer(nn.Module):
478
+ def __init__(self, config):
479
+ super().__init__()
480
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
481
+ self.seq_len_dim = 1
482
+ self.attention = EsmAttention(config)
483
+ self.is_decoder = config.is_decoder
484
+ self.add_cross_attention = config.add_cross_attention
485
+ if self.add_cross_attention:
486
+ if not self.is_decoder:
487
+ raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
488
+ self.crossattention = EsmAttention(config)
489
+ self.intermediate = EsmIntermediate(config)
490
+ self.output = EsmOutput(config)
491
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
492
+
493
+ def forward(
494
+ self,
495
+ hidden_states,
496
+ attention_mask=None,
497
+ head_mask=None,
498
+ encoder_hidden_states=None,
499
+ encoder_attention_mask=None,
500
+ past_key_value=None,
501
+ output_attentions=False,
502
+ ):
503
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
504
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
505
+ self_attention_outputs = self.attention(
506
+ hidden_states,
507
+ attention_mask,
508
+ head_mask,
509
+ output_attentions=output_attentions,
510
+ past_key_value=self_attn_past_key_value,
511
+ )
512
+ attention_output = self_attention_outputs[0]
513
+
514
+ # if decoder, the last output is tuple of self-attn cache
515
+ if self.is_decoder:
516
+ outputs = self_attention_outputs[1:-1]
517
+ present_key_value = self_attention_outputs[-1]
518
+ else:
519
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
520
+
521
+ cross_attn_present_key_value = None
522
+ if self.is_decoder and encoder_hidden_states is not None:
523
+ if not hasattr(self, "crossattention"):
524
+ raise AttributeError(
525
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated"
526
+ " with cross-attention layers by setting `config.add_cross_attention=True`"
527
+ )
528
+
529
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
530
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
531
+ cross_attention_outputs = self.crossattention(
532
+ attention_output,
533
+ attention_mask,
534
+ head_mask,
535
+ encoder_hidden_states,
536
+ encoder_attention_mask,
537
+ cross_attn_past_key_value,
538
+ output_attentions,
539
+ )
540
+ attention_output = cross_attention_outputs[0]
541
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
542
+
543
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
544
+ cross_attn_present_key_value = cross_attention_outputs[-1]
545
+ present_key_value = present_key_value + cross_attn_present_key_value
546
+
547
+ layer_output = self.feed_forward_chunk(attention_output)
548
+
549
+ outputs = (layer_output,) + outputs
550
+
551
+ # if decoder, return the attn key/values as the last output
552
+ if self.is_decoder:
553
+ outputs = outputs + (present_key_value,)
554
+ return outputs
555
+
556
+ def feed_forward_chunk(self, attention_output):
557
+ attention_output_ln = self.LayerNorm(attention_output)
558
+ intermediate_output = self.intermediate(attention_output_ln)
559
+ layer_output = self.output(intermediate_output, attention_output)
560
+ return layer_output
561
+
562
+
563
+ class EsmEncoder(nn.Module):
564
+ def __init__(self, config):
565
+ super().__init__()
566
+ self.config = config
567
+ self.layer = nn.ModuleList([EsmLayer(config) for _ in range(config.num_hidden_layers)])
568
+ self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
569
+ self.gradient_checkpointing = False
570
+
571
+ def forward(
572
+ self,
573
+ hidden_states,
574
+ attention_mask=None,
575
+ head_mask=None,
576
+ encoder_hidden_states=None,
577
+ encoder_attention_mask=None,
578
+ past_key_values=None,
579
+ use_cache=None,
580
+ output_attentions=False,
581
+ output_hidden_states=False,
582
+ return_dict=True,
583
+ ):
584
+ if self.gradient_checkpointing and self.training:
585
+ if use_cache:
586
+ logger.warning_once(
587
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
588
+ "`use_cache=False`..."
589
+ )
590
+ use_cache = False
591
+ all_hidden_states = () if output_hidden_states else None
592
+ all_self_attentions = () if output_attentions else None
593
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
594
+
595
+ next_decoder_cache = () if use_cache else None
596
+ for i, layer_module in enumerate(self.layer):
597
+ if output_hidden_states:
598
+ all_hidden_states = all_hidden_states + (hidden_states,)
599
+
600
+ layer_head_mask = head_mask[i] if head_mask is not None else None
601
+ past_key_value = past_key_values[i] if past_key_values is not None else None
602
+
603
+ if self.gradient_checkpointing and self.training:
604
+ layer_outputs = self._gradient_checkpointing_func(
605
+ layer_module.__call__,
606
+ hidden_states,
607
+ attention_mask,
608
+ layer_head_mask,
609
+ encoder_hidden_states,
610
+ encoder_attention_mask,
611
+ past_key_value,
612
+ output_attentions,
613
+ )
614
+ else:
615
+ layer_outputs = layer_module(
616
+ hidden_states,
617
+ attention_mask,
618
+ layer_head_mask,
619
+ encoder_hidden_states,
620
+ encoder_attention_mask,
621
+ past_key_value,
622
+ output_attentions,
623
+ )
624
+
625
+ hidden_states = layer_outputs[0]
626
+ if use_cache:
627
+ next_decoder_cache = next_decoder_cache + (layer_outputs[-1],)
628
+ if output_attentions:
629
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
630
+ if self.config.add_cross_attention:
631
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
632
+
633
+ if self.emb_layer_norm_after:
634
+ hidden_states = self.emb_layer_norm_after(hidden_states)
635
+
636
+ if output_hidden_states:
637
+ all_hidden_states = all_hidden_states + (hidden_states,)
638
+
639
+ if not return_dict:
640
+ return tuple(
641
+ v
642
+ for v in [
643
+ hidden_states,
644
+ next_decoder_cache,
645
+ all_hidden_states,
646
+ all_self_attentions,
647
+ all_cross_attentions,
648
+ ]
649
+ if v is not None
650
+ )
651
+ return BaseModelOutputWithPastAndCrossAttentions(
652
+ last_hidden_state=hidden_states,
653
+ past_key_values=next_decoder_cache,
654
+ hidden_states=all_hidden_states,
655
+ attentions=all_self_attentions,
656
+ cross_attentions=all_cross_attentions,
657
+ )
658
+
659
+
660
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
661
+ class EsmPooler(nn.Module):
662
+ def __init__(self, config):
663
+ super().__init__()
664
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
665
+ self.activation = nn.Tanh()
666
+
667
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
668
+ # We "pool" the model by simply taking the hidden state corresponding
669
+ # to the first token.
670
+ first_token_tensor = hidden_states[:, 0]
671
+ pooled_output = self.dense(first_token_tensor)
672
+ pooled_output = self.activation(pooled_output)
673
+ return pooled_output
674
+
675
+
676
+ class EsmPreTrainedModel(PreTrainedModel):
677
+ """
678
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
679
+ models.
680
+ """
681
+
682
+ config_class = EsmConfig
683
+ base_model_prefix = "esm"
684
+ supports_gradient_checkpointing = True
685
+ _no_split_modules = ["EsmLayer", "EsmFoldTriangularSelfAttentionBlock", "EsmEmbeddings"]
686
+
687
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
688
+ def _init_weights(self, module):
689
+ """Initialize the weights"""
690
+ if isinstance(module, nn.Linear):
691
+ # Slightly different from the TF version which uses truncated_normal for initialization
692
+ # cf https://github.com/pytorch/pytorch/pull/5617
693
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
694
+ if module.bias is not None:
695
+ module.bias.data.zero_()
696
+ elif isinstance(module, nn.Embedding):
697
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
698
+ if module.padding_idx is not None:
699
+ module.weight.data[module.padding_idx].zero_()
700
+ elif isinstance(module, nn.LayerNorm):
701
+ module.bias.data.zero_()
702
+ module.weight.data.fill_(1.0)
703
+
704
+
705
+ ESM_START_DOCSTRING = r"""
706
+
707
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
708
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
709
+ etc.)
710
+
711
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
712
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
713
+ and behavior.
714
+
715
+ Parameters:
716
+ config ([`EsmConfig`]): Model configuration class with all the parameters of the
717
+ model. Initializing with a config file does not load the weights associated with the model, only the
718
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
719
+ """
720
+
721
+ ESM_INPUTS_DOCSTRING = r"""
722
+ Args:
723
+ input_ids (`torch.LongTensor` of shape `({0})`):
724
+ Indices of input sequence tokens in the vocabulary.
725
+
726
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
727
+ [`PreTrainedTokenizer.__call__`] for details.
728
+
729
+ [What are input IDs?](../glossary#input-ids)
730
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
731
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
732
+
733
+ - 1 for tokens that are **not masked**,
734
+ - 0 for tokens that are **masked**.
735
+
736
+ [What are attention masks?](../glossary#attention-mask)
737
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
738
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
739
+ config.max_position_embeddings - 1]`.
740
+
741
+ [What are position IDs?](../glossary#position-ids)
742
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
743
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
744
+
745
+ - 1 indicates the head is **not masked**,
746
+ - 0 indicates the head is **masked**.
747
+
748
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
749
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
750
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
751
+ model's internal embedding lookup matrix.
752
+ output_attentions (`bool`, *optional*):
753
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
754
+ tensors for more detail.
755
+ output_hidden_states (`bool`, *optional*):
756
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
757
+ more detail.
758
+ return_dict (`bool`, *optional*):
759
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
760
+ """
761
+
762
+
763
+ @add_start_docstrings(
764
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
765
+ ESM_START_DOCSTRING,
766
+ )
767
+ class EsmModel(EsmPreTrainedModel):
768
+ """
769
+
770
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
771
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
772
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
773
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
774
+
775
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
776
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
777
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
778
+ """
779
+
780
+ def __init__(self, config, add_pooling_layer=True):
781
+ super().__init__(config)
782
+ self.config = config
783
+
784
+ self.embeddings = EsmEmbeddings(config)
785
+ self.encoder = EsmEncoder(config)
786
+
787
+ self.pooler = EsmPooler(config) if add_pooling_layer else None
788
+
789
+ self.contact_head = EsmContactPredictionHead(
790
+ in_features=config.num_hidden_layers * config.num_attention_heads, bias=True
791
+ )
792
+
793
+ # Initialize weights and apply final processing
794
+ self.post_init()
795
+
796
+ def get_input_embeddings(self):
797
+ return self.embeddings.word_embeddings
798
+
799
+ def set_input_embeddings(self, value):
800
+ self.embeddings.word_embeddings = value
801
+
802
+ def _prune_heads(self, heads_to_prune):
803
+ """
804
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
805
+ class PreTrainedModel
806
+ """
807
+ for layer, heads in heads_to_prune.items():
808
+ self.encoder.layer[layer].attention.prune_heads(heads)
809
+
810
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
811
+ @add_code_sample_docstrings(
812
+ checkpoint=_CHECKPOINT_FOR_DOC,
813
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
814
+ config_class=_CONFIG_FOR_DOC,
815
+ )
816
+ def forward(
817
+ self,
818
+ input_ids: Optional[torch.Tensor] = None,
819
+ attention_mask: Optional[torch.Tensor] = None,
820
+ position_ids: Optional[torch.Tensor] = None,
821
+ head_mask: Optional[torch.Tensor] = None,
822
+ inputs_embeds: Optional[torch.Tensor] = None,
823
+ encoder_hidden_states: Optional[torch.Tensor] = None,
824
+ encoder_attention_mask: Optional[torch.Tensor] = None,
825
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
826
+ use_cache: Optional[bool] = None,
827
+ output_attentions: Optional[bool] = None,
828
+ output_hidden_states: Optional[bool] = None,
829
+ return_dict: Optional[bool] = None,
830
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
831
+ r"""
832
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
833
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
834
+ the model is configured as a decoder.
835
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
836
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
837
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
838
+
839
+ - 1 for tokens that are **not masked**,
840
+ - 0 for tokens that are **masked**.
841
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
842
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
843
+
844
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
845
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
846
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
847
+ use_cache (`bool`, *optional*):
848
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
849
+ `past_key_values`).
850
+ """
851
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
852
+ output_hidden_states = (
853
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
854
+ )
855
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
856
+
857
+ if self.config.is_decoder:
858
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
859
+ else:
860
+ use_cache = False
861
+
862
+ if input_ids is not None and inputs_embeds is not None:
863
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
864
+ elif input_ids is not None:
865
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
866
+ input_shape = input_ids.size()
867
+ elif inputs_embeds is not None:
868
+ input_shape = inputs_embeds.size()[:-1]
869
+ else:
870
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
871
+
872
+ batch_size, seq_length = input_shape
873
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
874
+
875
+ # past_key_values_length
876
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
877
+
878
+ if attention_mask is None:
879
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
880
+
881
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
882
+ # ourselves in which case we just need to make it broadcastable to all heads.
883
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
884
+
885
+ # If a 2D or 3D attention mask is provided for the cross-attention
886
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
887
+ if self.config.is_decoder and encoder_hidden_states is not None:
888
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
889
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
890
+ if encoder_attention_mask is None:
891
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
892
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
893
+ else:
894
+ encoder_extended_attention_mask = None
895
+
896
+ # Prepare head mask if needed
897
+ # 1.0 in head_mask indicate we keep the head
898
+ # attention_probs has shape bsz x n_heads x N x N
899
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
900
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
901
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
902
+
903
+ embedding_output = self.embeddings(
904
+ input_ids=input_ids,
905
+ position_ids=position_ids,
906
+ attention_mask=attention_mask,
907
+ inputs_embeds=inputs_embeds,
908
+ past_key_values_length=past_key_values_length,
909
+ )
910
+ encoder_outputs = self.encoder(
911
+ embedding_output,
912
+ attention_mask=extended_attention_mask,
913
+ head_mask=head_mask,
914
+ encoder_hidden_states=encoder_hidden_states,
915
+ encoder_attention_mask=encoder_extended_attention_mask,
916
+ past_key_values=past_key_values,
917
+ use_cache=use_cache,
918
+ output_attentions=output_attentions,
919
+ output_hidden_states=output_hidden_states,
920
+ return_dict=return_dict,
921
+ )
922
+ sequence_output = encoder_outputs[0]
923
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
924
+
925
+ if not return_dict:
926
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
927
+
928
+ return BaseModelOutputWithPoolingAndCrossAttentions(
929
+ last_hidden_state=sequence_output,
930
+ pooler_output=pooled_output,
931
+ past_key_values=encoder_outputs.past_key_values,
932
+ hidden_states=encoder_outputs.hidden_states,
933
+ attentions=encoder_outputs.attentions,
934
+ cross_attentions=encoder_outputs.cross_attentions,
935
+ )
936
+
937
+ def predict_contacts(self, tokens, attention_mask):
938
+ attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions
939
+ attns = torch.stack(attns, dim=1) # Matches the original model layout
940
+ # In the original model, attentions for padding tokens are completely zeroed out.
941
+ # This makes no difference most of the time because the other tokens won't attend to them,
942
+ # but it does for the contact prediction task, which takes attentions as input,
943
+ # so we have to mimic that here.
944
+ attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(3)
945
+ attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(4)
946
+ return self.contact_head(tokens, attns)
947
+
948
+
949
+ @add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING)
950
+ class EsmForMaskedLM(EsmPreTrainedModel):
951
+ _tied_weights_keys = ["lm_head.decoder.weight"]
952
+
953
+ def __init__(self, config):
954
+ super().__init__(config)
955
+
956
+ if config.is_decoder:
957
+ logger.warning(
958
+ "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for "
959
+ "bi-directional self-attention."
960
+ )
961
+
962
+ self.esm = EsmModel(config, add_pooling_layer=False)
963
+ self.lm_head = EsmLMHead(config)
964
+
965
+ self.init_weights()
966
+
967
+ def get_output_embeddings(self):
968
+ return self.lm_head.decoder
969
+
970
+ def set_output_embeddings(self, new_embeddings):
971
+ self.lm_head.decoder = new_embeddings
972
+
973
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
974
+ @add_code_sample_docstrings(
975
+ checkpoint=_CHECKPOINT_FOR_DOC,
976
+ output_type=MaskedLMOutput,
977
+ config_class=_CONFIG_FOR_DOC,
978
+ mask="<mask>",
979
+ )
980
+ def forward(
981
+ self,
982
+ input_ids: Optional[torch.LongTensor] = None,
983
+ attention_mask: Optional[torch.Tensor] = None,
984
+ position_ids: Optional[torch.LongTensor] = None,
985
+ head_mask: Optional[torch.Tensor] = None,
986
+ inputs_embeds: Optional[torch.FloatTensor] = None,
987
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
988
+ encoder_attention_mask: Optional[torch.Tensor] = None,
989
+ labels: Optional[torch.LongTensor] = None,
990
+ output_attentions: Optional[bool] = None,
991
+ output_hidden_states: Optional[bool] = None,
992
+ return_dict: Optional[bool] = None,
993
+ ) -> Union[Tuple, MaskedLMOutput]:
994
+ r"""
995
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
996
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
997
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
998
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
999
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1000
+ Used to hide legacy arguments that have been deprecated.
1001
+ """
1002
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1003
+
1004
+ outputs = self.esm(
1005
+ input_ids,
1006
+ attention_mask=attention_mask,
1007
+ position_ids=position_ids,
1008
+ head_mask=head_mask,
1009
+ inputs_embeds=inputs_embeds,
1010
+ encoder_hidden_states=encoder_hidden_states,
1011
+ encoder_attention_mask=encoder_attention_mask,
1012
+ output_attentions=output_attentions,
1013
+ output_hidden_states=output_hidden_states,
1014
+ return_dict=return_dict,
1015
+ )
1016
+ sequence_output = outputs[0]
1017
+ prediction_scores = self.lm_head(sequence_output)
1018
+
1019
+ masked_lm_loss = None
1020
+ if labels is not None:
1021
+ loss_fct = CrossEntropyLoss()
1022
+
1023
+ labels = labels.to(prediction_scores.device)
1024
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1025
+
1026
+ if not return_dict:
1027
+ output = (prediction_scores,) + outputs[2:]
1028
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1029
+
1030
+ return MaskedLMOutput(
1031
+ loss=masked_lm_loss,
1032
+ logits=prediction_scores,
1033
+ hidden_states=outputs.hidden_states,
1034
+ attentions=outputs.attentions,
1035
+ )
1036
+
1037
+ def predict_contacts(self, tokens, attention_mask):
1038
+ return self.esm.predict_contacts(tokens, attention_mask=attention_mask)
1039
+
1040
+
1041
+ class EsmLMHead(nn.Module):
1042
+ """ESM Head for masked language modeling."""
1043
+
1044
+ def __init__(self, config):
1045
+ super().__init__()
1046
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1047
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1048
+
1049
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1050
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1051
+
1052
+ def forward(self, features, **kwargs):
1053
+ x = self.dense(features)
1054
+ x = gelu(x)
1055
+ x = self.layer_norm(x)
1056
+
1057
+ # project back to size of vocabulary with bias
1058
+ x = self.decoder(x) + self.bias
1059
+ return x
1060
+
1061
+
1062
+ @add_start_docstrings(
1063
+ """
1064
+ ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1065
+ output) e.g. for GLUE tasks.
1066
+ """,
1067
+ ESM_START_DOCSTRING,
1068
+ )
1069
+ class EsmForSequenceClassification(EsmPreTrainedModel):
1070
+ def __init__(self, config):
1071
+ super().__init__(config)
1072
+ self.num_labels = config.num_labels
1073
+ self.config = config
1074
+
1075
+ self.esm = EsmModel(config, add_pooling_layer=False)
1076
+ self.classifier = EsmClassificationHead(config)
1077
+
1078
+ self.init_weights()
1079
+
1080
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1081
+ @add_code_sample_docstrings(
1082
+ checkpoint=_CHECKPOINT_FOR_DOC,
1083
+ output_type=SequenceClassifierOutput,
1084
+ config_class=_CONFIG_FOR_DOC,
1085
+ )
1086
+ def forward(
1087
+ self,
1088
+ input_ids: Optional[torch.LongTensor] = None,
1089
+ attention_mask: Optional[torch.Tensor] = None,
1090
+ position_ids: Optional[torch.LongTensor] = None,
1091
+ head_mask: Optional[torch.Tensor] = None,
1092
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1093
+ labels: Optional[torch.LongTensor] = None,
1094
+ output_attentions: Optional[bool] = None,
1095
+ output_hidden_states: Optional[bool] = None,
1096
+ return_dict: Optional[bool] = None,
1097
+ ) -> Union[Tuple, SequenceClassifierOutput]:
1098
+ r"""
1099
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1100
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1101
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1102
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1103
+ """
1104
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1105
+
1106
+ outputs = self.esm(
1107
+ input_ids,
1108
+ attention_mask=attention_mask,
1109
+ position_ids=position_ids,
1110
+ head_mask=head_mask,
1111
+ inputs_embeds=inputs_embeds,
1112
+ output_attentions=output_attentions,
1113
+ output_hidden_states=output_hidden_states,
1114
+ return_dict=return_dict,
1115
+ )
1116
+ sequence_output = outputs[0]
1117
+ logits = self.classifier(sequence_output)
1118
+
1119
+ loss = None
1120
+ if labels is not None:
1121
+ labels = labels.to(logits.device)
1122
+
1123
+ if self.config.problem_type is None:
1124
+ if self.num_labels == 1:
1125
+ self.config.problem_type = "regression"
1126
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1127
+ self.config.problem_type = "single_label_classification"
1128
+ else:
1129
+ self.config.problem_type = "multi_label_classification"
1130
+
1131
+ if self.config.problem_type == "regression":
1132
+ loss_fct = MSELoss()
1133
+ if self.num_labels == 1:
1134
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1135
+ else:
1136
+ loss = loss_fct(logits, labels)
1137
+ elif self.config.problem_type == "single_label_classification":
1138
+ loss_fct = CrossEntropyLoss()
1139
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1140
+ elif self.config.problem_type == "multi_label_classification":
1141
+ loss_fct = BCEWithLogitsLoss()
1142
+ loss = loss_fct(logits, labels)
1143
+
1144
+ if not return_dict:
1145
+ output = (logits,) + outputs[2:]
1146
+ return ((loss,) + output) if loss is not None else output
1147
+
1148
+ return SequenceClassifierOutput(
1149
+ loss=loss,
1150
+ logits=logits,
1151
+ hidden_states=outputs.hidden_states,
1152
+ attentions=outputs.attentions,
1153
+ )
1154
+
1155
+
1156
+ @add_start_docstrings(
1157
+ """
1158
+ ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1159
+ Named-Entity-Recognition (NER) tasks.
1160
+ """,
1161
+ ESM_START_DOCSTRING,
1162
+ )
1163
+ class EsmForTokenClassification(EsmPreTrainedModel):
1164
+ def __init__(self, config):
1165
+ super().__init__(config)
1166
+ self.num_labels = config.num_labels
1167
+
1168
+ self.esm = EsmModel(config, add_pooling_layer=False)
1169
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1170
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1171
+
1172
+ self.init_weights()
1173
+
1174
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1175
+ @add_code_sample_docstrings(
1176
+ checkpoint=_CHECKPOINT_FOR_DOC,
1177
+ output_type=TokenClassifierOutput,
1178
+ config_class=_CONFIG_FOR_DOC,
1179
+ )
1180
+ def forward(
1181
+ self,
1182
+ input_ids: Optional[torch.LongTensor] = None,
1183
+ attention_mask: Optional[torch.Tensor] = None,
1184
+ position_ids: Optional[torch.LongTensor] = None,
1185
+ head_mask: Optional[torch.Tensor] = None,
1186
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1187
+ labels: Optional[torch.LongTensor] = None,
1188
+ output_attentions: Optional[bool] = None,
1189
+ output_hidden_states: Optional[bool] = None,
1190
+ return_dict: Optional[bool] = None,
1191
+ ) -> Union[Tuple, TokenClassifierOutput]:
1192
+ r"""
1193
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1194
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1195
+ """
1196
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1197
+
1198
+ outputs = self.esm(
1199
+ input_ids,
1200
+ attention_mask=attention_mask,
1201
+ position_ids=position_ids,
1202
+ head_mask=head_mask,
1203
+ inputs_embeds=inputs_embeds,
1204
+ output_attentions=output_attentions,
1205
+ output_hidden_states=output_hidden_states,
1206
+ return_dict=return_dict,
1207
+ )
1208
+
1209
+ sequence_output = outputs[0]
1210
+
1211
+ sequence_output = self.dropout(sequence_output)
1212
+ logits = self.classifier(sequence_output)
1213
+
1214
+ loss = None
1215
+ if labels is not None:
1216
+ loss_fct = CrossEntropyLoss()
1217
+
1218
+ labels = labels.to(logits.device)
1219
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1220
+
1221
+ if not return_dict:
1222
+ output = (logits,) + outputs[2:]
1223
+ return ((loss,) + output) if loss is not None else output
1224
+
1225
+ return TokenClassifierOutput(
1226
+ loss=loss,
1227
+ logits=logits,
1228
+ hidden_states=outputs.hidden_states,
1229
+ attentions=outputs.attentions,
1230
+ )
1231
+
1232
+
1233
+ class EsmClassificationHead(nn.Module):
1234
+ """Head for sentence-level classification tasks."""
1235
+
1236
+ def __init__(self, config):
1237
+ super().__init__()
1238
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1239
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1240
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1241
+
1242
+ def forward(self, features, **kwargs):
1243
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1244
+ x = self.dropout(x)
1245
+ x = self.dense(x)
1246
+ x = torch.tanh(x)
1247
+ x = self.dropout(x)
1248
+ x = self.out_proj(x)
1249
+ return x
1250
+
1251
+
1252
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1253
+ """
1254
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1255
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1256
+
1257
+ Args:
1258
+ x: torch.Tensor x:
1259
+
1260
+ Returns: torch.Tensor
1261
+ """
1262
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1263
+ mask = input_ids.ne(padding_idx).int()
1264
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
1265
+ return incremental_indices.long() + padding_idx
venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esmfold.py ADDED
@@ -0,0 +1,2322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import math
16
+ import sys
17
+ from dataclasses import dataclass
18
+ from functools import partial
19
+ from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.nn as nn
24
+ from torch.nn import LayerNorm
25
+
26
+ from ...integrations.deepspeed import is_deepspeed_available
27
+ from ...modeling_outputs import ModelOutput
28
+ from ...utils import (
29
+ ContextManagers,
30
+ add_start_docstrings,
31
+ add_start_docstrings_to_model_forward,
32
+ is_scipy_available,
33
+ logging,
34
+ replace_return_docstrings,
35
+ )
36
+ from .configuration_esm import EsmConfig
37
+ from .modeling_esm import ESM_START_DOCSTRING, EsmModel, EsmPreTrainedModel
38
+ from .openfold_utils import (
39
+ OFProtein,
40
+ Rigid,
41
+ Rotation,
42
+ atom14_to_atom37,
43
+ chunk_layer,
44
+ compute_predicted_aligned_error,
45
+ compute_tm,
46
+ frames_and_literature_positions_to_atom14_pos,
47
+ make_atom14_masks,
48
+ residue_constants,
49
+ to_pdb,
50
+ torsion_angles_to_frames,
51
+ )
52
+
53
+
54
+ logger = logging.get_logger(__name__)
55
+ _CHECKPOINT_FOR_DOC = "facebook/esmfold_v1"
56
+ _CONFIG_FOR_DOC = "EsmConfig"
57
+
58
+
59
+ @dataclass
60
+ class EsmForProteinFoldingOutput(ModelOutput):
61
+ """
62
+ Output type of [`EsmForProteinFoldingOutput`].
63
+
64
+ Args:
65
+ frames (`torch.FloatTensor`):
66
+ Output frames.
67
+ sidechain_frames (`torch.FloatTensor`):
68
+ Output sidechain frames.
69
+ unnormalized_angles (`torch.FloatTensor`):
70
+ Predicted unnormalized backbone and side chain torsion angles.
71
+ angles (`torch.FloatTensor`):
72
+ Predicted backbone and side chain torsion angles.
73
+ positions (`torch.FloatTensor`):
74
+ Predicted positions of the backbone and side chain atoms.
75
+ states (`torch.FloatTensor`):
76
+ Hidden states from the protein folding trunk.
77
+ s_s (`torch.FloatTensor`):
78
+ Per-residue embeddings derived by concatenating the hidden states of each layer of the ESM-2 LM stem.
79
+ s_z (`torch.FloatTensor`):
80
+ Pairwise residue embeddings.
81
+ distogram_logits (`torch.FloatTensor`):
82
+ Input logits to the distogram used to compute residue distances.
83
+ lm_logits (`torch.FloatTensor`):
84
+ Logits output by the ESM-2 protein language model stem.
85
+ aatype (`torch.FloatTensor`):
86
+ Input amino acids (AlphaFold2 indices).
87
+ atom14_atom_exists (`torch.FloatTensor`):
88
+ Whether each atom exists in the atom14 representation.
89
+ residx_atom14_to_atom37 (`torch.FloatTensor`):
90
+ Mapping between atoms in the atom14 and atom37 representations.
91
+ residx_atom37_to_atom14 (`torch.FloatTensor`):
92
+ Mapping between atoms in the atom37 and atom14 representations.
93
+ atom37_atom_exists (`torch.FloatTensor`):
94
+ Whether each atom exists in the atom37 representation.
95
+ residue_index (`torch.FloatTensor`):
96
+ The index of each residue in the protein chain. Unless internal padding tokens are used, this will just be
97
+ a sequence of integers from 0 to `sequence_length`.
98
+ lddt_head (`torch.FloatTensor`):
99
+ Raw outputs from the lddt head used to compute plddt.
100
+ plddt (`torch.FloatTensor`):
101
+ Per-residue confidence scores. Regions of low confidence may indicate areas where the model's prediction is
102
+ uncertain, or where the protein structure is disordered.
103
+ ptm_logits (`torch.FloatTensor`):
104
+ Raw logits used for computing ptm.
105
+ ptm (`torch.FloatTensor`):
106
+ TM-score output representing the model's high-level confidence in the overall structure.
107
+ aligned_confidence_probs (`torch.FloatTensor`):
108
+ Per-residue confidence scores for the aligned structure.
109
+ predicted_aligned_error (`torch.FloatTensor`):
110
+ Predicted error between the model's prediction and the ground truth.
111
+ max_predicted_aligned_error (`torch.FloatTensor`):
112
+ Per-sample maximum predicted error.
113
+ """
114
+
115
+ frames: torch.FloatTensor = None
116
+ sidechain_frames: torch.FloatTensor = None
117
+ unnormalized_angles: torch.FloatTensor = None
118
+ angles: torch.FloatTensor = None
119
+ positions: torch.FloatTensor = None
120
+ states: torch.FloatTensor = None
121
+ s_s: torch.FloatTensor = None
122
+ s_z: torch.FloatTensor = None
123
+ distogram_logits: torch.FloatTensor = None
124
+ lm_logits: torch.FloatTensor = None
125
+ aatype: torch.FloatTensor = None
126
+ atom14_atom_exists: torch.FloatTensor = None
127
+ residx_atom14_to_atom37: torch.FloatTensor = None
128
+ residx_atom37_to_atom14: torch.FloatTensor = None
129
+ atom37_atom_exists: torch.FloatTensor = None
130
+ residue_index: torch.FloatTensor = None
131
+ lddt_head: torch.FloatTensor = None
132
+ plddt: torch.FloatTensor = None
133
+ ptm_logits: torch.FloatTensor = None
134
+ ptm: torch.FloatTensor = None
135
+ aligned_confidence_probs: torch.FloatTensor = None
136
+ predicted_aligned_error: torch.FloatTensor = None
137
+ max_predicted_aligned_error: torch.FloatTensor = None
138
+
139
+
140
+ ESMFOLD_INPUTS_DOCSTRING = r"""
141
+ Args:
142
+ input_ids (`torch.LongTensor` of shape `({0})`):
143
+ Indices of input sequence tokens in the vocabulary.
144
+
145
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
146
+ [`PreTrainedTokenizer.__call__`] for details.
147
+
148
+ [What are input IDs?](../glossary#input-ids)
149
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
150
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
151
+
152
+ - 1 for tokens that are **not masked**,
153
+ - 0 for tokens that are **masked**.
154
+
155
+ [What are attention masks?](../glossary#attention-mask)
156
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
157
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
158
+ config.max_position_embeddings - 1]`.
159
+
160
+ [What are position IDs?](../glossary#position-ids)
161
+ masking_pattern (`torch.LongTensor` of shape `({0})`, *optional*):
162
+ Locations of tokens to mask during training as a form of regularization. Mask values selected in `[0, 1]`.
163
+ num_recycles (`int`, *optional*, defaults to `None`):
164
+ Number of times to recycle the input sequence. If `None`, defaults to `config.num_recycles`. "Recycling"
165
+ consists of passing the output of the folding trunk back in as input to the trunk. During training, the
166
+ number of recycles should vary with each batch, to ensure that the model learns to output valid predictions
167
+ after each recycle. During inference, num_recycles should be set to the highest value that the model was
168
+ trained with for maximum accuracy. Accordingly, when this value is set to `None`, config.max_recycles is
169
+ used.
170
+ """
171
+
172
+
173
+ def is_fp16_enabled():
174
+ # Autocast world
175
+ fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16
176
+ fp16_enabled = fp16_enabled and torch.is_autocast_enabled()
177
+
178
+ return fp16_enabled
179
+
180
+
181
+ def is_deepspeed_initialized():
182
+ if is_deepspeed_available():
183
+ return False
184
+ else:
185
+ try:
186
+ import deepspeed
187
+
188
+ # This is not available in all DeepSpeed versions.
189
+ return deepspeed.utils.is_initialized()
190
+ except Exception:
191
+ return False
192
+
193
+
194
+ def collate_dense_tensors(samples: List[torch.Tensor], pad_v: float = 0) -> torch.Tensor:
195
+ """
196
+ Takes a list of tensors with the following dimensions:
197
+ [(d_11, ..., d_1K),
198
+ (d_21, ..., d_2K), ..., (d_N1, ..., d_NK)]
199
+ and stack + pads them into a single tensor of:
200
+ (N, max_i=1,N { d_i1 }, ..., max_i=1,N {diK})
201
+ """
202
+ if len(samples) == 0:
203
+ return torch.Tensor()
204
+ if len({x.dim() for x in samples}) != 1:
205
+ raise RuntimeError(f"Samples has varying dimensions: {[x.dim() for x in samples]}")
206
+ (device,) = tuple({x.device for x in samples}) # assumes all on same device
207
+ max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])]
208
+ result = torch.empty(len(samples), *max_shape, dtype=samples[0].dtype, device=device)
209
+ result.fill_(pad_v)
210
+ for i in range(len(samples)):
211
+ result_i = result[i]
212
+ t = samples[i]
213
+ result_i[tuple(slice(0, k) for k in t.shape)] = t
214
+ return result
215
+
216
+
217
+ def flatten_final_dims(t: torch.Tensor, no_dims: int):
218
+ return t.reshape(t.shape[:-no_dims] + (-1,))
219
+
220
+
221
+ def permute_final_dims(tensor: torch.Tensor, inds: List[int]):
222
+ zero_index = -1 * len(inds)
223
+ first_inds = list(range(len(tensor.shape[:zero_index])))
224
+ return tensor.permute(first_inds + [zero_index + i for i in inds])
225
+
226
+
227
+ def dict_multimap(fn, dicts):
228
+ first = dicts[0]
229
+ new_dict = {}
230
+ for k, v in first.items():
231
+ all_v = [d[k] for d in dicts]
232
+ if isinstance(v, dict):
233
+ new_dict[k] = dict_multimap(fn, all_v)
234
+ else:
235
+ new_dict[k] = fn(all_v)
236
+
237
+ return new_dict
238
+
239
+
240
+ def trunc_normal_init_(weights, scale=1.0, fan="fan_in"):
241
+ shape = weights.shape
242
+ scale = scale / max(1, shape[1])
243
+
244
+ if not is_scipy_available():
245
+ logger.warning(
246
+ "This init requires scipy, but scipy was not found, default to an approximation that might not be"
247
+ " equivalent."
248
+ )
249
+ std = math.sqrt(scale)
250
+ torch.nn.init.normal_(weights, std=std).clamp(min=0.0, max=2.0 * std)
251
+
252
+ else:
253
+ from scipy.stats import truncnorm
254
+
255
+ std = math.sqrt(scale) / truncnorm.std(a=-2, b=2, loc=0, scale=1)
256
+ samples = truncnorm.rvs(a=-2, b=2, loc=0, scale=std, size=weights.numel())
257
+ samples = np.reshape(samples, shape)
258
+ weights.copy_(torch.tensor(samples, device=weights.device))
259
+
260
+
261
+ def ipa_point_weights_init_(weights):
262
+ with torch.no_grad():
263
+ softplus_inverse_1 = 0.541324854612918
264
+ weights.fill_(softplus_inverse_1)
265
+
266
+
267
+ class EsmFoldLinear(nn.Linear):
268
+ """
269
+ A Linear layer with built-in nonstandard initializations. Called just like torch.nn.Linear.
270
+
271
+ Implements the initializers in 1.11.4, plus some additional ones found in the code.
272
+ """
273
+
274
+ def __init__(
275
+ self,
276
+ in_dim: int,
277
+ out_dim: int,
278
+ bias: bool = True,
279
+ init: str = "default",
280
+ init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,
281
+ ):
282
+ """
283
+ Args:
284
+ in_dim:
285
+ The final dimension of inputs to the layer
286
+ out_dim:
287
+ The final dimension of layer outputs
288
+ bias:
289
+ Whether to learn an additive bias. True by default
290
+ init:
291
+ The initializer to use. Choose from:
292
+
293
+ "default": LeCun fan-in truncated normal initialization "relu": He initialization w/ truncated normal
294
+ distribution "glorot": Fan-average Glorot uniform initialization "gating": Weights=0, Bias=1 "normal":
295
+ Normal initialization with std=1/sqrt(fan_in) "final": Weights=0, Bias=0
296
+
297
+ Overridden by init_fn if the latter is not None.
298
+ init_fn:
299
+ A custom initializer taking weight and bias as inputs. Overrides init if not None.
300
+ """
301
+ super().__init__(in_dim, out_dim, bias=bias)
302
+
303
+ if bias:
304
+ with torch.no_grad():
305
+ self.bias.fill_(0)
306
+ self.init = init
307
+ self.init_fn = init_fn
308
+
309
+ if init not in ["default", "relu", "glorot", "gating", "normal", "final"]:
310
+ raise ValueError("Invalid init string.")
311
+
312
+
313
+ class EsmFoldLayerNorm(nn.Module):
314
+ def __init__(self, c_in, eps=1e-5):
315
+ super().__init__()
316
+
317
+ self.c_in = (c_in,)
318
+ self.eps = eps
319
+
320
+ self.weight = nn.Parameter(torch.ones(c_in))
321
+ self.bias = nn.Parameter(torch.zeros(c_in))
322
+
323
+ def forward(self, x):
324
+ d = x.dtype
325
+ if d is torch.bfloat16 and not is_deepspeed_initialized():
326
+ with torch.cuda.amp.autocast(enabled=False):
327
+ out = nn.functional.layer_norm(x, self.c_in, self.weight.to(dtype=d), self.bias.to(dtype=d), self.eps)
328
+ else:
329
+ out = nn.functional.layer_norm(x, self.c_in, self.weight, self.bias, self.eps)
330
+
331
+ return out
332
+
333
+
334
+ @torch.jit.ignore
335
+ def softmax_no_cast(t: torch.Tensor, dim: int = -1) -> torch.Tensor:
336
+ """
337
+ Softmax, but without automatic casting to fp32 when the input is of type bfloat16
338
+ """
339
+ d = t.dtype
340
+ if d is torch.bfloat16 and not is_deepspeed_initialized():
341
+ with torch.cuda.amp.autocast(enabled=False):
342
+ s = torch.nn.functional.softmax(t, dim=dim)
343
+ else:
344
+ s = torch.nn.functional.softmax(t, dim=dim)
345
+
346
+ return s
347
+
348
+
349
+ class EsmFoldAttention(nn.Module):
350
+ """
351
+ Standard multi-head attention using AlphaFold's default layer initialization. Allows multiple bias vectors.
352
+ """
353
+
354
+ def __init__(
355
+ self,
356
+ c_q: int,
357
+ c_k: int,
358
+ c_v: int,
359
+ c_hidden: int,
360
+ no_heads: int,
361
+ gating: bool = True,
362
+ ):
363
+ """
364
+ Args:
365
+ c_q:
366
+ Input dimension of query data
367
+ c_k:
368
+ Input dimension of key data
369
+ c_v:
370
+ Input dimension of value data
371
+ c_hidden:
372
+ Per-head hidden dimension
373
+ no_heads:
374
+ Number of attention heads
375
+ gating:
376
+ Whether the output should be gated using query data
377
+ """
378
+ super().__init__()
379
+
380
+ self.c_q = c_q
381
+ self.c_k = c_k
382
+ self.c_v = c_v
383
+ self.c_hidden = c_hidden
384
+ self.no_heads = no_heads
385
+ self.gating = gating
386
+
387
+ # DISCREPANCY: c_hidden is not the per-head channel dimension, as
388
+ # stated in the supplement, but the overall channel dimension.
389
+
390
+ self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot")
391
+ self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init="glorot")
392
+ self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init="glorot")
393
+ self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init="final")
394
+
395
+ self.linear_g = None
396
+ if self.gating:
397
+ self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init="gating")
398
+
399
+ self.sigmoid = nn.Sigmoid()
400
+
401
+ def _prep_qkv(self, q_x: torch.Tensor, kv_x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
402
+ # [*, Q/K/V, H * C_hidden]
403
+ q = self.linear_q(q_x)
404
+ k = self.linear_k(kv_x)
405
+ v = self.linear_v(kv_x)
406
+
407
+ # [*, Q/K, H, C_hidden]
408
+ q = q.view(q.shape[:-1] + (self.no_heads, -1))
409
+ k = k.view(k.shape[:-1] + (self.no_heads, -1))
410
+ v = v.view(v.shape[:-1] + (self.no_heads, -1))
411
+
412
+ # [*, H, Q/K, C_hidden]
413
+ q = q.transpose(-2, -3)
414
+ k = k.transpose(-2, -3)
415
+ v = v.transpose(-2, -3)
416
+
417
+ q /= math.sqrt(self.c_hidden)
418
+
419
+ return q, k, v
420
+
421
+ def _wrap_up(self, o: torch.Tensor, q_x: torch.Tensor) -> torch.Tensor:
422
+ if self.linear_g is not None:
423
+ g = self.sigmoid(self.linear_g(q_x))
424
+
425
+ # [*, Q, H, C_hidden]
426
+ g = g.view(g.shape[:-1] + (self.no_heads, -1))
427
+ o = o * g
428
+
429
+ # [*, Q, H * C_hidden]
430
+ o = flatten_final_dims(o, 2)
431
+
432
+ # [*, Q, C_q]
433
+ o = self.linear_o(o)
434
+
435
+ return o
436
+
437
+ def forward(
438
+ self,
439
+ q_x: torch.Tensor,
440
+ kv_x: torch.Tensor,
441
+ biases: Optional[List[torch.Tensor]] = None,
442
+ use_memory_efficient_kernel: bool = False,
443
+ use_lma: bool = False,
444
+ lma_q_chunk_size: int = 1024,
445
+ lma_kv_chunk_size: int = 4096,
446
+ use_flash: bool = False,
447
+ flash_mask: Optional[torch.Tensor] = None,
448
+ ) -> torch.Tensor:
449
+ """
450
+ Args:
451
+ q_x:
452
+ [*, Q, C_q] query data
453
+ kv_x:
454
+ [*, K, C_k] key data
455
+ biases:
456
+ List of biases that broadcast to [*, H, Q, K]
457
+ use_memory_efficient_kernel:
458
+ Whether to use a custom memory-efficient attention kernel. This should be the default choice for most.
459
+ If none of the "use_<...>" flags are True, a stock PyTorch implementation is used instead
460
+ use_lma:
461
+ Whether to use low-memory attention (Staats & Rabe 2021). If none of the "use_<...>" flags are True, a
462
+ stock PyTorch implementation is used instead
463
+ lma_q_chunk_size:
464
+ Query chunk size (for LMA)
465
+ lma_kv_chunk_size:
466
+ Key/Value chunk size (for LMA)
467
+ Returns
468
+ [*, Q, C_q] attention update
469
+ """
470
+ if use_lma and (lma_q_chunk_size is None or lma_kv_chunk_size is None):
471
+ raise ValueError("If use_lma is specified, lma_q_chunk_size and lma_kv_chunk_size must be provided")
472
+
473
+ if use_flash and biases is not None:
474
+ raise ValueError("use_flash is incompatible with the bias option. For masking, use flash_mask instead")
475
+
476
+ attn_options = [use_memory_efficient_kernel, use_lma, use_flash]
477
+ if sum(attn_options) > 1:
478
+ raise ValueError("Choose at most one alternative attention algorithm")
479
+
480
+ if biases is None:
481
+ biases = []
482
+
483
+ # [*, H, Q/K, C_hidden]
484
+ query, key, value = self._prep_qkv(q_x, kv_x)
485
+ key = permute_final_dims(key, (1, 0))
486
+
487
+ # [*, H, Q, K]
488
+ output = torch.matmul(query, key)
489
+ for b in biases:
490
+ output += b
491
+ output = softmax_no_cast(output, -1)
492
+
493
+ # [*, H, Q, C_hidden]
494
+ output = torch.matmul(output, value)
495
+ output = output.transpose(-2, -3)
496
+ output = self._wrap_up(output, q_x)
497
+
498
+ return output
499
+
500
+
501
+ class EsmFoldTriangleAttention(nn.Module):
502
+ def __init__(self, c_in, c_hidden, no_heads, starting=True, inf=1e9):
503
+ """
504
+ Args:
505
+ c_in:
506
+ Input channel dimension
507
+ c_hidden:
508
+ Overall hidden channel dimension (not per-head)
509
+ no_heads:
510
+ Number of attention heads
511
+ """
512
+ super().__init__()
513
+
514
+ self.c_in = c_in
515
+ self.c_hidden = c_hidden
516
+ self.no_heads = no_heads
517
+ self.starting = starting
518
+ self.inf = inf
519
+
520
+ self.layer_norm = LayerNorm(self.c_in)
521
+
522
+ self.linear = EsmFoldLinear(c_in, self.no_heads, bias=False, init="normal")
523
+
524
+ self.mha = EsmFoldAttention(self.c_in, self.c_in, self.c_in, self.c_hidden, self.no_heads)
525
+
526
+ @torch.jit.ignore
527
+ def _chunk(
528
+ self,
529
+ x: torch.Tensor,
530
+ biases: List[torch.Tensor],
531
+ chunk_size: int,
532
+ use_memory_efficient_kernel: bool = False,
533
+ use_lma: bool = False,
534
+ inplace_safe: bool = False,
535
+ ) -> torch.Tensor:
536
+ "triangle! triangle!"
537
+ mha_inputs = {
538
+ "q_x": x,
539
+ "kv_x": x,
540
+ "biases": biases,
541
+ }
542
+
543
+ return chunk_layer(
544
+ partial(self.mha, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma),
545
+ mha_inputs,
546
+ chunk_size=chunk_size,
547
+ no_batch_dims=len(x.shape[:-2]),
548
+ _out=x if inplace_safe else None,
549
+ )
550
+
551
+ def forward(
552
+ self,
553
+ x: torch.Tensor,
554
+ mask: Optional[torch.Tensor] = None,
555
+ chunk_size: Optional[int] = None,
556
+ use_memory_efficient_kernel: bool = False,
557
+ use_lma: bool = False,
558
+ inplace_safe: bool = False,
559
+ ) -> torch.Tensor:
560
+ """
561
+ Args:
562
+ x:
563
+ [*, I, J, C_in] input tensor (e.g. the pair representation)
564
+ Returns:
565
+ [*, I, J, C_in] output tensor
566
+ """
567
+ if mask is None:
568
+ # [*, I, J]
569
+ mask = x.new_ones(
570
+ x.shape[:-1],
571
+ )
572
+
573
+ if not self.starting:
574
+ x = x.transpose(-2, -3)
575
+ mask = mask.transpose(-1, -2)
576
+
577
+ # [*, I, J, C_in]
578
+ x = self.layer_norm(x)
579
+
580
+ # [*, I, 1, 1, J]
581
+ mask_bias = (self.inf * (mask - 1))[..., :, None, None, :]
582
+
583
+ # [*, H, I, J]
584
+ triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1))
585
+
586
+ # [*, 1, H, I, J]
587
+ triangle_bias = triangle_bias.unsqueeze(-4)
588
+
589
+ biases = [mask_bias, triangle_bias]
590
+
591
+ if chunk_size is not None:
592
+ x = self._chunk(
593
+ x,
594
+ biases,
595
+ chunk_size,
596
+ use_memory_efficient_kernel=use_memory_efficient_kernel,
597
+ use_lma=use_lma,
598
+ inplace_safe=inplace_safe,
599
+ )
600
+ else:
601
+ x = self.mha(
602
+ q_x=x, kv_x=x, biases=biases, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma
603
+ )
604
+
605
+ if not self.starting:
606
+ x = x.transpose(-2, -3)
607
+
608
+ return x
609
+
610
+
611
+ class EsmFoldTriangleMultiplicativeUpdate(nn.Module):
612
+ """
613
+ Implements Algorithms 11 and 12.
614
+ """
615
+
616
+ def __init__(self, config, _outgoing=True):
617
+ super().__init__()
618
+ c_hidden = config.pairwise_state_dim
619
+ self._outgoing = _outgoing
620
+
621
+ self.linear_a_p = EsmFoldLinear(c_hidden, c_hidden)
622
+ self.linear_a_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
623
+ self.linear_b_p = EsmFoldLinear(c_hidden, c_hidden)
624
+ self.linear_b_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
625
+ self.linear_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
626
+ self.linear_z = EsmFoldLinear(c_hidden, c_hidden, init="final")
627
+
628
+ self.layer_norm_in = LayerNorm(c_hidden)
629
+ self.layer_norm_out = LayerNorm(c_hidden)
630
+
631
+ self.sigmoid = nn.Sigmoid()
632
+
633
+ def _combine_projections(
634
+ self, a: torch.Tensor, b: torch.Tensor, _inplace_chunk_size: Optional[int] = None
635
+ ) -> torch.Tensor:
636
+ if self._outgoing:
637
+ a = permute_final_dims(a, (2, 0, 1))
638
+ b = permute_final_dims(b, (2, 1, 0))
639
+ else:
640
+ a = permute_final_dims(a, (2, 1, 0))
641
+ b = permute_final_dims(b, (2, 0, 1))
642
+
643
+ if _inplace_chunk_size is not None:
644
+ # To be replaced by torch vmap
645
+ for i in range(0, a.shape[-3], _inplace_chunk_size):
646
+ a_chunk = a[..., i : i + _inplace_chunk_size, :, :]
647
+ b_chunk = b[..., i : i + _inplace_chunk_size, :, :]
648
+ a[..., i : i + _inplace_chunk_size, :, :] = torch.matmul(
649
+ a_chunk,
650
+ b_chunk,
651
+ )
652
+
653
+ p = a
654
+ else:
655
+ p = torch.matmul(a, b)
656
+
657
+ return permute_final_dims(p, (1, 2, 0))
658
+
659
+ def _inference_forward(
660
+ self,
661
+ z: torch.Tensor,
662
+ mask: Optional[torch.Tensor] = None,
663
+ inplace_chunk_size: Optional[int] = None,
664
+ with_add: bool = True,
665
+ ):
666
+ """
667
+ Args:
668
+ z:
669
+ A [*, N, N, C_z] pair representation
670
+ mask:
671
+ A [*, N, N] pair mask
672
+ inplace_chunk_size:
673
+ Size of chunks used in the main computation. Increase to trade memory for speed.
674
+ with_add:
675
+ If True, z is overwritten with (z + update). Otherwise, it is overwritten with (update).
676
+ Returns:
677
+ A reference to the overwritten z
678
+
679
+ More memory-efficient, inference-only version of the forward function. Uses in-place operations, fusion of the
680
+ addition that happens after this module in the Evoformer, a smidge of recomputation, and a cache of overwritten
681
+ values to lower peak memory consumption of this module from 5x the size of the input tensor z to 2.5x its size.
682
+ Useful for inference on extremely long sequences.
683
+
684
+ It works as follows. We will make reference to variables used in the default forward implementation below.
685
+ Naively, triangle multiplication attention requires the manifestation of 5 tensors the size of z: 1) z, the
686
+ "square" input tensor, 2) a, the first projection of z, 3) b, the second projection of b, 4) g, a z-sized mask,
687
+ and 5) a z-sized tensor for intermediate computations. For large N, this is prohibitively expensive; for
688
+ N=4000, for example, z is more than 8GB alone. To avoid this problem, we compute b, g, and all intermediate
689
+ tensors in small chunks, noting that the chunks required to compute a chunk of the output depend only on the
690
+ tensor a and corresponding vertical and horizontal chunks of z. This suggests an algorithm that loops over
691
+ pairs of chunks of z: hereafter "columns" and "rows" of z, even though each "column" and "row" in fact contains
692
+ inplace_chunk_size contiguous true columns and rows of z. Writing output chunks to a new tensor would bring
693
+ total memory consumption down to 3x the size of z. However, more memory can be saved by writing output chunks
694
+ directly to z in-place. WLOG, we choose to write output chunks vertically, overwriting the ith "column" of z at
695
+ the end of the ith iteration of the main loop. Despite this overwriting, the ith column is always one column
696
+ ahead of previously overwritten columns and can be recovered directly from z. After the first iteration,
697
+ however, the ith row of z is always at least partially overwritten. For this reason, we introduce the z-cache,
698
+ a tensor one-half the size of z. The z-cache initially contains the left half (2nd and 3rd quadrants) of z. For
699
+ 0 < i < N/2, the missing left part of the ith row of z is recovered from this cache at the beginning of the ith
700
+ iteration. Once i exceeds n/2, the cache is "reoriented" to encompass the 3rd and 4th quadrants of z instead.
701
+ Though the 3rd quadrant of the original z is entirely overwritten at this point, it can be recovered from the
702
+ z-cache itself. Thereafter, the ith row of z can be recovered in its entirety from the reoriented z-cache.
703
+ After the final iteration, z has been completely overwritten and contains the triangular multiplicative update.
704
+ If with_add is True, it instead contains the sum of z and the triangular multiplicative update. In either case,
705
+ peak memory consumption is just 2.5x the size of z, disregarding memory used for chunks and other small
706
+ variables.
707
+ """
708
+ if mask is None:
709
+ mask = z.new_ones(z.shape[:-1])
710
+
711
+ mask = mask.unsqueeze(-1)
712
+
713
+ def compute_projection_helper(pair, mask, a=True):
714
+ if a:
715
+ linear_g = self.linear_a_g
716
+ linear_p = self.linear_a_p
717
+ else:
718
+ linear_g = self.linear_b_g
719
+ linear_p = self.linear_b_p
720
+
721
+ pair = self.layer_norm_in(pair)
722
+ p = linear_g(pair)
723
+ p.sigmoid_()
724
+ p *= linear_p(pair)
725
+ p *= mask
726
+ p = permute_final_dims(p, (2, 0, 1))
727
+ return p
728
+
729
+ def compute_projection(pair, mask, a=True, chunked=True):
730
+ need_transpose = self._outgoing ^ a
731
+ if not chunked:
732
+ p = compute_projection_helper(pair, mask, a)
733
+ if need_transpose:
734
+ p = p.transpose(-1, -2)
735
+ else:
736
+ # This computation is chunked so as not to exceed our 2.5x
737
+ # budget with a large intermediate tensor
738
+ linear_g = self.linear_a_g if a else self.linear_b_g
739
+ c = linear_g.bias.shape[-1]
740
+ out_shape = pair.shape[:-3] + (c,) + pair.shape[-3:-1]
741
+ p = pair.new_zeros(out_shape)
742
+ for i in range(0, pair.shape[-3], inplace_chunk_size):
743
+ pair_chunk = pair[..., i : i + inplace_chunk_size, :, :]
744
+ pair_chunk = compute_projection_helper(
745
+ pair[..., i : i + inplace_chunk_size, :, :],
746
+ mask[..., i : i + inplace_chunk_size, :, :],
747
+ a,
748
+ )
749
+ if need_transpose:
750
+ pair_chunk = pair_chunk.transpose(-1, -2)
751
+ p[..., i : i + inplace_chunk_size] = pair_chunk
752
+ else:
753
+ p[..., i : i + inplace_chunk_size, :] = pair_chunk
754
+
755
+ del pair_chunk
756
+
757
+ return p
758
+
759
+ # We start by fully manifesting a. In addition to the input, this
760
+ # brings total memory consumption to 2x z (disregarding size of chunks)
761
+ # [*, N, N, c]
762
+ a = compute_projection(z, mask, True, chunked=True)
763
+
764
+ if inplace_chunk_size is not None:
765
+ n = a.shape[-1]
766
+ half_n = n // 2 + n % 2
767
+ row_dim = -3
768
+ col_dim = -2
769
+ b_chunk_dim = row_dim if self._outgoing else col_dim
770
+
771
+ def empty_slicer(t):
772
+ return [slice(None) for _ in t.shape]
773
+
774
+ def slice_tensor(t, start, end, dim):
775
+ # Slices start:end from the dim dimension of t
776
+ s = empty_slicer(t)
777
+ s[dim] = slice(start, end)
778
+ return t[s]
779
+
780
+ def flip_z_cache_(z_cache, z):
781
+ # "Reorient" the z_cache (see below), filling it with quadrants
782
+ # 3---recovered from the z_cache---and 4---recovered from z---
783
+ # of the input tensor z.
784
+ quadrant_3 = slice_tensor(z_cache, half_n, None, row_dim)
785
+ z_cache = z_cache.transpose(row_dim, col_dim)
786
+
787
+ # If n is odd, we need to shrink the z_cache by one row
788
+ z_cache = z_cache[..., : (n // 2), :, :]
789
+
790
+ # Move the 3rd quadrant of z into the
791
+ first_half_slicer = empty_slicer(z_cache)
792
+ first_half_slicer[col_dim] = slice(0, half_n)
793
+ z_cache[first_half_slicer] = quadrant_3
794
+
795
+ # Get the fourth quadrant of z
796
+ quadrant_4 = slice_tensor(z, half_n, None, row_dim)
797
+ quadrant_4 = slice_tensor(quadrant_4, half_n, None, col_dim)
798
+
799
+ # Insert said quadrant into the rotated z-cache
800
+ quadrant_3_slicer = empty_slicer(z_cache)
801
+ quadrant_3_slicer[col_dim] = slice(half_n, None)
802
+
803
+ z_cache[quadrant_3_slicer] = quadrant_4
804
+
805
+ return z_cache
806
+
807
+ # Initialize the z cache to the left half of z.
808
+ z_cache_shape = list(z.shape)
809
+ z_cache_shape[col_dim] = half_n
810
+ z_cache = z.new_zeros(z_cache_shape)
811
+ z_cache_slicer = empty_slicer(z_cache)
812
+ z_cache_slicer[col_dim] = slice(0, half_n)
813
+ z_cache.copy_(z[z_cache_slicer])
814
+ z_cache_rotated = False
815
+
816
+ # We need to reorient the z-cache at the halfway point, and we
817
+ # don't want a single chunk to straddle that point. We contract one
818
+ # of the chunks in the middle to address that problem.
819
+ i_range = list(range(0, half_n, inplace_chunk_size))
820
+ initial_offsets = [i_2 - i_1 for i_1, i_2 in zip(i_range, i_range[1:] + [half_n])]
821
+ after_half = list(range(half_n, n, inplace_chunk_size))
822
+ after_half_offsets = [inplace_chunk_size for _ in after_half]
823
+ combined_range_with_offsets = zip(i_range + after_half, initial_offsets + after_half_offsets)
824
+ for i, offset in combined_range_with_offsets:
825
+ if not z_cache_rotated and i >= half_n:
826
+ z_cache = flip_z_cache_(z_cache, z)
827
+ z_cache_rotated = True
828
+
829
+ z_chunk_b = slice_tensor(z, i, i + offset, b_chunk_dim)
830
+ mask_chunk = slice_tensor(mask, i, i + offset, b_chunk_dim)
831
+
832
+ z_chunk_b = z_chunk_b.clone()
833
+ if b_chunk_dim == col_dim:
834
+ z_chunk_b = slice_tensor(z, i, i + offset, col_dim)
835
+ else: # b_chunk_dim == row_dim
836
+ # In this case, the b-dimension (b_chunk_dim) is partially
837
+ # overwritten at the end of each iteration. We need to
838
+ # restore the missing component from the z-cache.
839
+ if not z_cache_rotated:
840
+ z_chunk_slicer = empty_slicer(z_chunk_b)
841
+ z_chunk_slicer[col_dim] = slice(0, half_n)
842
+ z_chunk_b[z_chunk_slicer] = slice_tensor(z_cache, i, i + offset, row_dim)
843
+ else:
844
+ z_cache_offset = i - half_n
845
+ z_chunk_b = slice_tensor(z_cache, z_cache_offset, z_cache_offset + offset, row_dim)
846
+
847
+ b_chunk = compute_projection(z_chunk_b, mask_chunk, a=False, chunked=False)
848
+ del z_chunk_b
849
+
850
+ x_chunk = torch.matmul(a, b_chunk)
851
+ x_chunk = permute_final_dims(x_chunk, (1, 2, 0))
852
+ x_chunk = self.layer_norm_out(x_chunk)
853
+ x_chunk = self.linear_z(x_chunk)
854
+
855
+ # The g dimension (col_dim) is parallel to and ahead of the
856
+ # overwrites in z. We can extract the g chunk normally.
857
+ z_chunk_g = slice_tensor(z, i, i + offset, col_dim)
858
+ g_chunk = self.linear_g(self.layer_norm_in(z_chunk_g))
859
+ g_chunk.sigmoid_()
860
+ del z_chunk_g
861
+
862
+ x_chunk *= g_chunk
863
+
864
+ # Write the columns into z in-place
865
+ z_slicer = empty_slicer(z)
866
+ z_slicer[col_dim] = slice(i, i + offset)
867
+ if with_add:
868
+ z[z_slicer] += x_chunk
869
+ else:
870
+ z[z_slicer] = x_chunk
871
+ else:
872
+ b = compute_projection(z, mask, False, False)
873
+ x = torch.matmul(a, b)
874
+ x = self.layer_norm_out(x)
875
+ x = self.linear_z(x)
876
+ g = self.linear_g(z)
877
+ g.sigmoid_()
878
+ x *= g
879
+ if with_add:
880
+ z += x
881
+ else:
882
+ z = x
883
+
884
+ return z
885
+
886
+ def forward(
887
+ self,
888
+ z: torch.Tensor,
889
+ mask: Optional[torch.Tensor] = None,
890
+ inplace_safe: bool = False,
891
+ _add_with_inplace: bool = False,
892
+ _inplace_chunk_size: Optional[int] = 256,
893
+ ) -> torch.Tensor:
894
+ """
895
+ Args:
896
+ x:
897
+ [*, N_res, N_res, C_z] input tensor
898
+ mask:
899
+ [*, N_res, N_res] input mask
900
+ Returns:
901
+ [*, N_res, N_res, C_z] output tensor
902
+ """
903
+ if inplace_safe:
904
+ x = self._inference_forward(
905
+ z,
906
+ mask,
907
+ inplace_chunk_size=_inplace_chunk_size,
908
+ with_add=_add_with_inplace,
909
+ )
910
+ return x
911
+
912
+ if mask is None:
913
+ mask = z.new_ones(z.shape[:-1])
914
+
915
+ mask = mask.unsqueeze(-1)
916
+
917
+ z = self.layer_norm_in(z)
918
+ a = mask
919
+ a = a * self.sigmoid(self.linear_a_g(z))
920
+ a = a * self.linear_a_p(z)
921
+ b = mask
922
+ b = b * self.sigmoid(self.linear_b_g(z))
923
+ b = b * self.linear_b_p(z)
924
+
925
+ if is_fp16_enabled():
926
+ with torch.cuda.amp.autocast(enabled=False):
927
+ x = self._combine_projections(a.float(), b.float())
928
+ else:
929
+ x = self._combine_projections(a, b)
930
+
931
+ del a, b
932
+ x = self.layer_norm_out(x)
933
+ x = self.linear_z(x)
934
+ g = self.sigmoid(self.linear_g(z))
935
+ x = x * g
936
+
937
+ return x
938
+
939
+
940
+ class EsmFoldPreTrainedModel(EsmPreTrainedModel):
941
+ """
942
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
943
+ models.
944
+ """
945
+
946
+ # Subclass `EsMPreTrainedModel` to deal with special init
947
+ def _init_weights(self, module):
948
+ """Initialize the weights"""
949
+ if isinstance(module, EsmFoldLinear):
950
+ with torch.no_grad():
951
+ if module.init_fn is not None:
952
+ module.init_fn(module.weight, module.bias)
953
+ elif module.init == "default":
954
+ trunc_normal_init_(module.weight, scale=1.0)
955
+ elif module.init == "relu":
956
+ trunc_normal_init_(module.weight, scale=2.0)
957
+ elif module.init == "glorot":
958
+ nn.init.xavier_uniform_(module.weight, gain=1)
959
+ elif module.init == "gating":
960
+ module.weight.fill_(0.0)
961
+ if module.bias:
962
+ module.bias.fill_(1.0)
963
+ elif module.init == "normal":
964
+ torch.nn.init.kaiming_normal_(module.weight, nonlinearity="linear")
965
+ elif module.init == "final":
966
+ module.weight.fill_(0.0)
967
+ elif isinstance(module, EsmFoldInvariantPointAttention):
968
+ ipa_point_weights_init_(module.head_weights)
969
+ elif isinstance(module, EsmFoldTriangularSelfAttentionBlock):
970
+ torch.nn.init.zeros_(module.tri_mul_in.linear_z.weight)
971
+ torch.nn.init.zeros_(module.tri_mul_in.linear_z.bias)
972
+ torch.nn.init.zeros_(module.tri_mul_out.linear_z.weight)
973
+ torch.nn.init.zeros_(module.tri_mul_out.linear_z.bias)
974
+ torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.weight)
975
+ torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.bias)
976
+ torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.weight)
977
+ torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.bias)
978
+
979
+ torch.nn.init.zeros_(module.sequence_to_pair.o_proj.weight)
980
+ torch.nn.init.zeros_(module.sequence_to_pair.o_proj.bias)
981
+ torch.nn.init.zeros_(module.pair_to_sequence.linear.weight)
982
+ torch.nn.init.zeros_(module.seq_attention.o_proj.weight)
983
+ torch.nn.init.zeros_(module.seq_attention.o_proj.bias)
984
+ torch.nn.init.zeros_(module.mlp_seq.mlp[-2].weight)
985
+ torch.nn.init.zeros_(module.mlp_seq.mlp[-2].bias)
986
+ torch.nn.init.zeros_(module.mlp_pair.mlp[-2].weight)
987
+ torch.nn.init.zeros_(module.mlp_pair.mlp[-2].bias)
988
+ else:
989
+ super()._init_weights(module)
990
+
991
+
992
+ class EsmFoldSelfAttention(nn.Module):
993
+ def __init__(self, embed_dim, num_heads, head_width, gated=False):
994
+ super().__init__()
995
+ assert embed_dim == num_heads * head_width
996
+
997
+ self.embed_dim = embed_dim
998
+ self.num_heads = num_heads
999
+ self.head_width = head_width
1000
+
1001
+ self.proj = nn.Linear(embed_dim, embed_dim * 3, bias=False)
1002
+ self.o_proj = nn.Linear(embed_dim, embed_dim, bias=True)
1003
+ self.gated = gated
1004
+ if gated:
1005
+ self.g_proj = nn.Linear(embed_dim, embed_dim)
1006
+ torch.nn.init.zeros_(self.g_proj.weight)
1007
+ torch.nn.init.ones_(self.g_proj.bias)
1008
+
1009
+ self.rescale_factor = self.head_width**-0.5
1010
+
1011
+ torch.nn.init.zeros_(self.o_proj.bias)
1012
+
1013
+ def forward(self, x, mask=None, bias=None, indices=None):
1014
+ """
1015
+ Basic self attention with optional mask and external pairwise bias. To handle sequences of different lengths,
1016
+ use mask.
1017
+
1018
+ Inputs:
1019
+ x: batch of input sequneces (.. x L x C) mask: batch of boolean masks where 1=valid, 0=padding position (..
1020
+ x L_k) bias: batch of scalar pairwise attention biases (.. x Lq x Lk x num_heads)
1021
+
1022
+ Outputs:
1023
+ sequence projection (B x L x embed_dim), attention maps (B x L x L x num_heads)
1024
+ """
1025
+
1026
+ t = self.proj(x).view(*x.shape[:2], self.num_heads, -1)
1027
+ t = t.permute(0, 2, 1, 3)
1028
+ q, k, v = t.chunk(3, dim=-1)
1029
+
1030
+ q = self.rescale_factor * q
1031
+ a = torch.einsum("...qc,...kc->...qk", q, k)
1032
+
1033
+ # Add external attention bias.
1034
+ if bias is not None:
1035
+ a = a + bias.permute(0, 3, 1, 2)
1036
+
1037
+ # Do not attend to padding tokens.
1038
+ if mask is not None:
1039
+ mask = mask[:, None, None]
1040
+ a = a.masked_fill(mask == False, -np.inf) # noqa: E712
1041
+
1042
+ a = nn.functional.softmax(a, dim=-1)
1043
+
1044
+ y = torch.einsum("...hqk,...hkc->...qhc", a, v)
1045
+ y = y.reshape(*y.shape[:2], -1)
1046
+
1047
+ if self.gated:
1048
+ y = self.g_proj(x).sigmoid() * y
1049
+ y = self.o_proj(y)
1050
+
1051
+ return y, a.permute(0, 3, 1, 2)
1052
+
1053
+
1054
+ class EsmFoldDropout(nn.Module):
1055
+ """
1056
+ Implementation of dropout with the ability to share the dropout mask along a particular dimension.
1057
+ """
1058
+
1059
+ def __init__(self, r: float, batch_dim: Union[int, List[int]]):
1060
+ super().__init__()
1061
+
1062
+ self.r = r
1063
+ if isinstance(batch_dim, int):
1064
+ batch_dim = [batch_dim]
1065
+ self.batch_dim = batch_dim
1066
+ self.dropout = nn.Dropout(self.r)
1067
+
1068
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1069
+ shape = list(x.shape)
1070
+ if self.batch_dim is not None:
1071
+ for bd in self.batch_dim:
1072
+ shape[bd] = 1
1073
+ return x * self.dropout(x.new_ones(shape))
1074
+
1075
+
1076
+ class EsmFoldSequenceToPair(nn.Module):
1077
+ def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim):
1078
+ super().__init__()
1079
+
1080
+ self.layernorm = nn.LayerNorm(sequence_state_dim)
1081
+ self.proj = nn.Linear(sequence_state_dim, inner_dim * 2, bias=True)
1082
+ self.o_proj = nn.Linear(2 * inner_dim, pairwise_state_dim, bias=True)
1083
+
1084
+ torch.nn.init.zeros_(self.proj.bias)
1085
+ torch.nn.init.zeros_(self.o_proj.bias)
1086
+
1087
+ def forward(self, sequence_state):
1088
+ """
1089
+ Inputs:
1090
+ sequence_state: B x L x sequence_state_dim
1091
+
1092
+ Output:
1093
+ pairwise_state: B x L x L x pairwise_state_dim
1094
+
1095
+ Intermediate state:
1096
+ B x L x L x 2*inner_dim
1097
+ """
1098
+
1099
+ assert len(sequence_state.shape) == 3
1100
+
1101
+ s = self.layernorm(sequence_state)
1102
+ s = self.proj(s)
1103
+ q, k = s.chunk(2, dim=-1)
1104
+
1105
+ prod = q[:, None, :, :] * k[:, :, None, :]
1106
+ diff = q[:, None, :, :] - k[:, :, None, :]
1107
+
1108
+ x = torch.cat([prod, diff], dim=-1)
1109
+ x = self.o_proj(x)
1110
+
1111
+ return x
1112
+
1113
+
1114
+ class EsmFoldPairToSequence(nn.Module):
1115
+ def __init__(self, pairwise_state_dim, num_heads):
1116
+ super().__init__()
1117
+
1118
+ self.layernorm = nn.LayerNorm(pairwise_state_dim)
1119
+ self.linear = nn.Linear(pairwise_state_dim, num_heads, bias=False)
1120
+
1121
+ def forward(self, pairwise_state):
1122
+ """
1123
+ Inputs:
1124
+ pairwise_state: B x L x L x pairwise_state_dim
1125
+
1126
+ Output:
1127
+ pairwise_bias: B x L x L x num_heads
1128
+ """
1129
+ assert len(pairwise_state.shape) == 4
1130
+ z = self.layernorm(pairwise_state)
1131
+ pairwise_bias = self.linear(z)
1132
+ return pairwise_bias
1133
+
1134
+
1135
+ class EsmFoldResidueMLP(nn.Module):
1136
+ def __init__(self, embed_dim, inner_dim, dropout=0):
1137
+ super().__init__()
1138
+
1139
+ self.mlp = nn.Sequential(
1140
+ nn.LayerNorm(embed_dim),
1141
+ nn.Linear(embed_dim, inner_dim),
1142
+ nn.ReLU(),
1143
+ nn.Linear(inner_dim, embed_dim),
1144
+ nn.Dropout(dropout),
1145
+ )
1146
+
1147
+ def forward(self, x):
1148
+ return x + self.mlp(x)
1149
+
1150
+
1151
+ class EsmFoldTriangularSelfAttentionBlock(nn.Module):
1152
+ def __init__(self, config):
1153
+ super().__init__()
1154
+ self.config = config
1155
+
1156
+ sequence_state_dim = config.sequence_state_dim
1157
+ pairwise_state_dim = config.pairwise_state_dim
1158
+ sequence_num_heads = sequence_state_dim // config.sequence_head_width
1159
+ pairwise_num_heads = pairwise_state_dim // config.pairwise_head_width
1160
+
1161
+ self.layernorm_1 = nn.LayerNorm(sequence_state_dim)
1162
+
1163
+ self.sequence_to_pair = EsmFoldSequenceToPair(sequence_state_dim, pairwise_state_dim // 2, pairwise_state_dim)
1164
+ self.pair_to_sequence = EsmFoldPairToSequence(pairwise_state_dim, sequence_num_heads)
1165
+
1166
+ self.seq_attention = EsmFoldSelfAttention(
1167
+ sequence_state_dim, sequence_num_heads, config.sequence_head_width, gated=True
1168
+ )
1169
+ self.tri_mul_out = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=True)
1170
+ self.tri_mul_in = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=False)
1171
+
1172
+ self.tri_att_start = EsmFoldTriangleAttention(
1173
+ pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=True
1174
+ )
1175
+ self.tri_att_end = EsmFoldTriangleAttention(
1176
+ pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=False
1177
+ )
1178
+
1179
+ self.mlp_seq = EsmFoldResidueMLP(sequence_state_dim, 4 * sequence_state_dim, dropout=config.dropout)
1180
+ self.mlp_pair = EsmFoldResidueMLP(pairwise_state_dim, 4 * pairwise_state_dim, dropout=config.dropout)
1181
+
1182
+ self.drop = nn.Dropout(config.dropout)
1183
+ self.row_drop = EsmFoldDropout(config.dropout * 2, 2)
1184
+ self.col_drop = EsmFoldDropout(config.dropout * 2, 1)
1185
+
1186
+ def forward(self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs):
1187
+ """
1188
+ Inputs:
1189
+ sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim mask: B x L boolean
1190
+ tensor of valid positions
1191
+
1192
+ Output:
1193
+ sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim
1194
+ """
1195
+ if len(sequence_state.shape) != 3:
1196
+ raise ValueError(f"`sequence_state` should be a 3d-tensor, got {len(sequence_state.shape)} dims.")
1197
+ if len(pairwise_state.shape) != 4:
1198
+ raise ValueError(f"`pairwise_state` should be a 4d-tensor, got {len(pairwise_state.shape)} dims.")
1199
+ if mask is not None and len(mask.shape) != 2:
1200
+ raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.")
1201
+
1202
+ batch_dim, seq_dim, sequence_state_dim = sequence_state.shape
1203
+ pairwise_state_dim = pairwise_state.shape[3]
1204
+
1205
+ if sequence_state_dim != self.config.sequence_state_dim:
1206
+ raise ValueError(
1207
+ "`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got "
1208
+ f"{sequence_state_dim} != {self.config.sequence_state_dim}."
1209
+ )
1210
+ if pairwise_state_dim != self.config.pairwise_state_dim:
1211
+ raise ValueError(
1212
+ "`pairwise_state` last dimension should be equal to `self.pairwise_state_dim`. Got "
1213
+ f"{pairwise_state_dim} != {self.config.pairwise_state_dim}."
1214
+ )
1215
+ if batch_dim != pairwise_state.shape[0]:
1216
+ raise ValueError(
1217
+ f"`sequence_state` and `pairwise_state` have inconsistent batch size: {batch_dim} != "
1218
+ f"{pairwise_state.shape[0]}."
1219
+ )
1220
+ if seq_dim != pairwise_state.shape[1] or seq_dim != pairwise_state.shape[2]:
1221
+ raise ValueError(
1222
+ f"`sequence_state` and `pairwise_state` have inconsistent sequence length: {seq_dim} != "
1223
+ f"{pairwise_state.shape[1]} or {pairwise_state.shape[2]}."
1224
+ )
1225
+
1226
+ # Update sequence state
1227
+ bias = self.pair_to_sequence(pairwise_state)
1228
+
1229
+ # Self attention with bias + mlp.
1230
+ y = self.layernorm_1(sequence_state)
1231
+ y, _ = self.seq_attention(y, mask=mask, bias=bias)
1232
+ sequence_state = sequence_state + self.drop(y)
1233
+ sequence_state = self.mlp_seq(sequence_state)
1234
+
1235
+ # Update pairwise state
1236
+ pairwise_state = pairwise_state + self.sequence_to_pair(sequence_state)
1237
+
1238
+ # Axial attention with triangular bias.
1239
+ tri_mask = mask.unsqueeze(2) * mask.unsqueeze(1) if mask is not None else None
1240
+ pairwise_state = pairwise_state + self.row_drop(self.tri_mul_out(pairwise_state, mask=tri_mask))
1241
+ pairwise_state = pairwise_state + self.col_drop(self.tri_mul_in(pairwise_state, mask=tri_mask))
1242
+ pairwise_state = pairwise_state + self.row_drop(
1243
+ self.tri_att_start(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
1244
+ )
1245
+ pairwise_state = pairwise_state + self.col_drop(
1246
+ self.tri_att_end(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
1247
+ )
1248
+
1249
+ # MLP over pairs.
1250
+ pairwise_state = self.mlp_pair(pairwise_state)
1251
+
1252
+ return sequence_state, pairwise_state
1253
+
1254
+
1255
+ class EsmCategoricalMixture:
1256
+ def __init__(self, param, bins=50, start=0, end=1):
1257
+ # All tensors are of shape ..., bins.
1258
+ self.logits = param
1259
+ bins = torch.linspace(start, end, bins + 1, device=self.logits.device, dtype=self.logits.dtype)
1260
+ self.v_bins = (bins[:-1] + bins[1:]) / 2
1261
+
1262
+ def log_prob(self, true):
1263
+ # Shapes are:
1264
+ # self.probs: ... x bins
1265
+ # true : ...
1266
+ true_index = (true.unsqueeze(-1) - self.v_bins[[None] * true.ndim]).abs().argmin(-1)
1267
+ nll = self.logits.log_softmax(-1)
1268
+ return torch.take_along_dim(nll, true_index.unsqueeze(-1), dim=-1).squeeze(-1)
1269
+
1270
+ def mean(self):
1271
+ return (self.logits.softmax(-1) @ self.v_bins.unsqueeze(1)).squeeze(-1)
1272
+
1273
+
1274
+ def categorical_lddt(logits, bins=50):
1275
+ # Logits are ..., 37, bins.
1276
+ return EsmCategoricalMixture(logits, bins=bins).mean()
1277
+
1278
+
1279
+ def get_axial_mask(mask):
1280
+ """
1281
+ Helper to convert B x L mask of valid positions to axial mask used in row column attentions.
1282
+
1283
+ Input:
1284
+ mask: B x L tensor of booleans
1285
+
1286
+ Output:
1287
+ mask: B x L x L tensor of booleans
1288
+ """
1289
+
1290
+ if mask is None:
1291
+ return None
1292
+
1293
+ if len(mask.shape) != 2:
1294
+ raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.")
1295
+ batch_dim, seq_dim = mask.shape
1296
+ m = mask.unsqueeze(1).expand(batch_dim, seq_dim, seq_dim)
1297
+ m = m.reshape(batch_dim * seq_dim, seq_dim)
1298
+ return m
1299
+
1300
+
1301
+ class EsmFoldRelativePosition(nn.Module):
1302
+ def __init__(self, config):
1303
+ super().__init__()
1304
+ self.bins = config.position_bins
1305
+
1306
+ # Note an additional offset is used so that the 0th position
1307
+ # is reserved for masked pairs.
1308
+ self.embedding = torch.nn.Embedding(2 * self.bins + 2, config.pairwise_state_dim)
1309
+
1310
+ def forward(self, residue_index, mask=None):
1311
+ """
1312
+ Input:
1313
+ residue_index: B x L tensor of indices (dytpe=torch.long) mask: B x L tensor of booleans
1314
+
1315
+ Output:
1316
+ pairwise_state: B x L x L x pairwise_state_dim tensor of embeddings
1317
+ """
1318
+ if residue_index.dtype != torch.long:
1319
+ raise ValueError(f"`residue_index` has dtype {residue_index.dtype}, it should be `torch.long`.")
1320
+ if mask is not None and residue_index.shape != mask.shape:
1321
+ raise ValueError(
1322
+ f"`residue_index` and `mask` have inconsistent shapes: {residue_index.shape} != {mask.shape}."
1323
+ )
1324
+
1325
+ diff = residue_index[:, None, :] - residue_index[:, :, None]
1326
+ diff = diff.clamp(-self.bins, self.bins)
1327
+ diff = diff + self.bins + 1 # Add 1 to adjust for padding index.
1328
+
1329
+ if mask is not None:
1330
+ mask = mask[:, None, :] * mask[:, :, None]
1331
+ diff[mask == False] = 0 # noqa: E712
1332
+
1333
+ output = self.embedding(diff)
1334
+ return output
1335
+
1336
+
1337
+ class EsmFoldAngleResnetBlock(nn.Module):
1338
+ def __init__(self, config):
1339
+ super().__init__()
1340
+
1341
+ self.linear_1 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="relu")
1342
+ self.linear_2 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="final")
1343
+
1344
+ self.relu = nn.ReLU()
1345
+
1346
+ def forward(self, a: torch.Tensor) -> torch.Tensor:
1347
+ s_initial = a
1348
+
1349
+ a = self.relu(a)
1350
+ a = self.linear_1(a)
1351
+ a = self.relu(a)
1352
+ a = self.linear_2(a)
1353
+
1354
+ return a + s_initial
1355
+
1356
+
1357
+ class EsmFoldAngleResnet(nn.Module):
1358
+ """
1359
+ Implements Algorithm 20, lines 11-14
1360
+ """
1361
+
1362
+ def __init__(self, config):
1363
+ super().__init__()
1364
+ self.config = config
1365
+
1366
+ self.linear_in = EsmFoldLinear(config.sequence_dim, config.resnet_dim)
1367
+ self.linear_initial = EsmFoldLinear(config.sequence_dim, config.resnet_dim)
1368
+
1369
+ self.layers = nn.ModuleList()
1370
+ for _ in range(config.num_resnet_blocks):
1371
+ layer = EsmFoldAngleResnetBlock(config)
1372
+ self.layers.append(layer)
1373
+
1374
+ self.linear_out = EsmFoldLinear(config.resnet_dim, config.num_angles * 2)
1375
+
1376
+ self.relu = nn.ReLU()
1377
+
1378
+ def forward(self, s: torch.Tensor, s_initial: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
1379
+ """
1380
+ Args:
1381
+ s:
1382
+ [*, C_hidden] single embedding
1383
+ s_initial:
1384
+ [*, C_hidden] single embedding as of the start of the StructureModule
1385
+ Returns:
1386
+ [*, no_angles, 2] predicted angles
1387
+ """
1388
+ # NOTE: The ReLU's applied to the inputs are absent from the supplement
1389
+ # pseudocode but present in the source. For maximal compatibility with
1390
+ # the pretrained weights, I'm going with the source.
1391
+
1392
+ # [*, C_hidden]
1393
+ s_initial = self.relu(s_initial)
1394
+ s_initial = self.linear_initial(s_initial)
1395
+ s = self.relu(s)
1396
+ s = self.linear_in(s)
1397
+ s = s + s_initial
1398
+
1399
+ for l in self.layers:
1400
+ s = l(s)
1401
+
1402
+ s = self.relu(s)
1403
+
1404
+ # [*, no_angles * 2]
1405
+ s = self.linear_out(s)
1406
+
1407
+ # [*, no_angles, 2]
1408
+ s = s.view(s.shape[:-1] + (-1, 2))
1409
+
1410
+ unnormalized_s = s
1411
+ norm_denom = torch.sqrt(
1412
+ torch.clamp(
1413
+ torch.sum(s**2, dim=-1, keepdim=True),
1414
+ min=self.config.epsilon,
1415
+ )
1416
+ )
1417
+ s = s / norm_denom
1418
+
1419
+ return unnormalized_s, s
1420
+
1421
+
1422
+ class EsmFoldInvariantPointAttention(nn.Module):
1423
+ """
1424
+ Implements Algorithm 22.
1425
+ """
1426
+
1427
+ def __init__(self, config):
1428
+ super().__init__()
1429
+ self.config = config
1430
+
1431
+ c_s = config.sequence_dim
1432
+ c_z = config.pairwise_dim
1433
+ self.hidden_dim = config.ipa_dim
1434
+ self.num_heads = config.num_heads_ipa
1435
+ self.num_qk_points = config.num_qk_points
1436
+ self.num_v_points = config.num_v_points
1437
+
1438
+ # These linear layers differ from their specifications in the
1439
+ # supplement. There, they lack bias and use Glorot initialization.
1440
+ # Here as in the official source, they have bias and use the default
1441
+ # Lecun initialization.
1442
+ hc = config.ipa_dim * config.num_heads_ipa
1443
+ self.linear_q = EsmFoldLinear(c_s, hc)
1444
+ self.linear_kv = EsmFoldLinear(c_s, 2 * hc)
1445
+
1446
+ hpq = config.num_heads_ipa * config.num_qk_points * 3
1447
+ self.linear_q_points = EsmFoldLinear(c_s, hpq)
1448
+
1449
+ hpkv = config.num_heads_ipa * (config.num_qk_points + config.num_v_points) * 3
1450
+ self.linear_kv_points = EsmFoldLinear(c_s, hpkv)
1451
+
1452
+ self.linear_b = EsmFoldLinear(c_z, config.num_heads_ipa)
1453
+
1454
+ self.head_weights = nn.Parameter(torch.zeros((config.num_heads_ipa)))
1455
+
1456
+ concat_out_dim = config.num_heads_ipa * (c_z + config.ipa_dim + config.num_v_points * 4)
1457
+ self.linear_out = EsmFoldLinear(concat_out_dim, c_s, init="final")
1458
+
1459
+ self.softmax = nn.Softmax(dim=-1)
1460
+ self.softplus = nn.Softplus()
1461
+
1462
+ def forward(
1463
+ self,
1464
+ s: torch.Tensor,
1465
+ z: Optional[torch.Tensor],
1466
+ r: Rigid,
1467
+ mask: torch.Tensor,
1468
+ _offload_inference: bool = False,
1469
+ _z_reference_list: Optional[Sequence[torch.Tensor]] = None,
1470
+ ) -> torch.Tensor:
1471
+ """
1472
+ Args:
1473
+ s:
1474
+ [*, N_res, C_s] single representation
1475
+ z:
1476
+ [*, N_res, N_res, C_z] pair representation
1477
+ r:
1478
+ [*, N_res] transformation object
1479
+ mask:
1480
+ [*, N_res] mask
1481
+ Returns:
1482
+ [*, N_res, C_s] single representation update
1483
+ """
1484
+ z = [z]
1485
+
1486
+ #######################################
1487
+ # Generate scalar and point activations
1488
+ #######################################
1489
+ # [*, N_res, H * C_hidden]
1490
+ q = self.linear_q(s)
1491
+ kv = self.linear_kv(s)
1492
+
1493
+ # [*, N_res, H, C_hidden]
1494
+ q = q.view(q.shape[:-1] + (self.num_heads, -1))
1495
+
1496
+ # [*, N_res, H, 2 * C_hidden]
1497
+ kv = kv.view(kv.shape[:-1] + (self.num_heads, -1))
1498
+
1499
+ # [*, N_res, H, C_hidden]
1500
+ k, v = torch.split(kv, self.hidden_dim, dim=-1)
1501
+
1502
+ # [*, N_res, H * P_q * 3]
1503
+ q_pts = self.linear_q_points(s)
1504
+
1505
+ # This is kind of clunky, but it's how the original does it
1506
+ # [*, N_res, H * P_q, 3]
1507
+ q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1)
1508
+ q_pts = torch.stack(q_pts, dim=-1)
1509
+ q_pts = r[..., None].apply(q_pts)
1510
+
1511
+ # [*, N_res, H, P_q, 3]
1512
+ q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3))
1513
+
1514
+ # [*, N_res, H * (P_q + P_v) * 3]
1515
+ kv_pts = self.linear_kv_points(s)
1516
+
1517
+ # [*, N_res, H * (P_q + P_v), 3]
1518
+ kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1)
1519
+ kv_pts = torch.stack(kv_pts, dim=-1)
1520
+ kv_pts = r[..., None].apply(kv_pts)
1521
+
1522
+ # [*, N_res, H, (P_q + P_v), 3]
1523
+ kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3))
1524
+
1525
+ # [*, N_res, H, P_q/P_v, 3]
1526
+ k_pts, v_pts = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2)
1527
+
1528
+ ##########################
1529
+ # Compute attention scores
1530
+ ##########################
1531
+ # [*, N_res, N_res, H]
1532
+ b = self.linear_b(z[0])
1533
+
1534
+ if _offload_inference:
1535
+ assert sys.getrefcount(z[0]) == 2
1536
+ z[0] = z[0].cpu()
1537
+
1538
+ # [*, H, N_res, N_res]
1539
+ if is_fp16_enabled():
1540
+ with torch.cuda.amp.autocast(enabled=False):
1541
+ a = torch.matmul(
1542
+ permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden]
1543
+ permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res]
1544
+ )
1545
+ else:
1546
+ a = torch.matmul(
1547
+ permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden]
1548
+ permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res]
1549
+ )
1550
+
1551
+ a *= math.sqrt(1.0 / (3 * self.hidden_dim))
1552
+ a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))
1553
+
1554
+ # [*, N_res, N_res, H, P_q, 3]
1555
+ pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)
1556
+ pt_att = pt_att**2
1557
+
1558
+ # [*, N_res, N_res, H, P_q]
1559
+ pt_att = sum(torch.unbind(pt_att, dim=-1))
1560
+ head_weights = self.softplus(self.head_weights).view(*((1,) * len(pt_att.shape[:-2]) + (-1, 1)))
1561
+ head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2)))
1562
+ pt_att = pt_att * head_weights
1563
+
1564
+ # [*, N_res, N_res, H]
1565
+ pt_att = torch.sum(pt_att, dim=-1) * (-0.5)
1566
+ # [*, N_res, N_res]
1567
+ square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)
1568
+ square_mask = self.config.inf * (square_mask - 1)
1569
+
1570
+ # [*, H, N_res, N_res]
1571
+ pt_att = permute_final_dims(pt_att, (2, 0, 1))
1572
+
1573
+ a = a + pt_att
1574
+ a = a + square_mask.unsqueeze(-3)
1575
+ a = self.softmax(a)
1576
+
1577
+ ################
1578
+ # Compute output
1579
+ ################
1580
+ # [*, N_res, H, C_hidden]
1581
+ o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3)
1582
+
1583
+ # [*, N_res, H * C_hidden]
1584
+ o = flatten_final_dims(o, 2)
1585
+
1586
+ # [*, H, 3, N_res, P_v]
1587
+ o_pt = torch.sum(
1588
+ (a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :]),
1589
+ dim=-2,
1590
+ )
1591
+
1592
+ # [*, N_res, H, P_v, 3]
1593
+ o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))
1594
+ o_pt = r[..., None, None].invert_apply(o_pt)
1595
+
1596
+ # [*, N_res, H * P_v]
1597
+ o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt**2, dim=-1) + self.config.epsilon), 2)
1598
+
1599
+ # [*, N_res, H * P_v, 3]
1600
+ o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)
1601
+
1602
+ if _offload_inference:
1603
+ z[0] = z[0].to(o_pt.device)
1604
+
1605
+ # [*, N_res, H, C_z]
1606
+ o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))
1607
+
1608
+ # [*, N_res, H * C_z]
1609
+ o_pair = flatten_final_dims(o_pair, 2)
1610
+
1611
+ # [*, N_res, C_s]
1612
+ s = self.linear_out(
1613
+ torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype)
1614
+ )
1615
+
1616
+ return s
1617
+
1618
+
1619
+ class EsmFoldBackboneUpdate(nn.Module):
1620
+ """
1621
+ Implements part of Algorithm 23.
1622
+ """
1623
+
1624
+ def __init__(self, config):
1625
+ super().__init__()
1626
+
1627
+ self.linear = EsmFoldLinear(config.sequence_dim, 6, init="final")
1628
+
1629
+ def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
1630
+ """
1631
+ Args:
1632
+ [*, N_res, C_s] single representation
1633
+ Returns:
1634
+ [*, N_res, 6] update vector
1635
+ """
1636
+ # [*, 6]
1637
+ update = self.linear(s)
1638
+
1639
+ return update
1640
+
1641
+
1642
+ class EsmFoldStructureModuleTransitionLayer(nn.Module):
1643
+ def __init__(self, config):
1644
+ super().__init__()
1645
+
1646
+ self.linear_1 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="relu")
1647
+ self.linear_2 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="relu")
1648
+ self.linear_3 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="final")
1649
+
1650
+ self.relu = nn.ReLU()
1651
+
1652
+ def forward(self, s):
1653
+ s_initial = s
1654
+ s = self.linear_1(s)
1655
+ s = self.relu(s)
1656
+ s = self.linear_2(s)
1657
+ s = self.relu(s)
1658
+ s = self.linear_3(s)
1659
+
1660
+ s = s + s_initial
1661
+
1662
+ return s
1663
+
1664
+
1665
+ class EsmFoldStructureModuleTransition(nn.Module):
1666
+ def __init__(self, config):
1667
+ super().__init__()
1668
+ self.config = config
1669
+
1670
+ self.layers = nn.ModuleList()
1671
+ for _ in range(config.num_transition_layers):
1672
+ l = EsmFoldStructureModuleTransitionLayer(config)
1673
+ self.layers.append(l)
1674
+
1675
+ self.dropout = nn.Dropout(config.dropout_rate)
1676
+ self.layer_norm = LayerNorm(config.sequence_dim)
1677
+
1678
+ def forward(self, s):
1679
+ for l in self.layers:
1680
+ s = l(s)
1681
+
1682
+ s = self.dropout(s)
1683
+ s = self.layer_norm(s)
1684
+
1685
+ return s
1686
+
1687
+
1688
+ class EsmFoldStructureModule(nn.Module):
1689
+ def __init__(self, config):
1690
+ super().__init__()
1691
+ self.config = config
1692
+
1693
+ # Buffers to be lazily initialized later
1694
+ # self.default_frames
1695
+ # self.group_idx
1696
+ # self.atom_mask
1697
+ # self.lit_positions
1698
+
1699
+ self.layer_norm_s = LayerNorm(config.sequence_dim)
1700
+ self.layer_norm_z = LayerNorm(config.pairwise_dim)
1701
+
1702
+ self.linear_in = EsmFoldLinear(config.sequence_dim, config.sequence_dim)
1703
+
1704
+ self.ipa = EsmFoldInvariantPointAttention(config)
1705
+
1706
+ self.ipa_dropout = nn.Dropout(config.dropout_rate)
1707
+ self.layer_norm_ipa = LayerNorm(config.sequence_dim)
1708
+
1709
+ self.transition = EsmFoldStructureModuleTransition(config)
1710
+ self.bb_update = EsmFoldBackboneUpdate(config)
1711
+ self.angle_resnet = EsmFoldAngleResnet(config)
1712
+
1713
+ def forward(
1714
+ self,
1715
+ evoformer_output_dict,
1716
+ aatype,
1717
+ mask=None,
1718
+ _offload_inference=False,
1719
+ ):
1720
+ """
1721
+ Args:
1722
+ evoformer_output_dict:
1723
+ Dictionary containing:
1724
+ "single":
1725
+ [*, N_res, C_s] single representation
1726
+ "pair":
1727
+ [*, N_res, N_res, C_z] pair representation
1728
+ aatype:
1729
+ [*, N_res] amino acid indices
1730
+ mask:
1731
+ Optional [*, N_res] sequence mask
1732
+ Returns:
1733
+ A dictionary of outputs
1734
+ """
1735
+ s = evoformer_output_dict["single"]
1736
+
1737
+ if mask is None:
1738
+ # [*, N]
1739
+ mask = s.new_ones(s.shape[:-1])
1740
+
1741
+ # [*, N, C_s]
1742
+ s = self.layer_norm_s(s)
1743
+
1744
+ # [*, N, N, C_z]
1745
+ z = self.layer_norm_z(evoformer_output_dict["pair"])
1746
+
1747
+ z_reference_list = None
1748
+ if _offload_inference:
1749
+ assert sys.getrefcount(evoformer_output_dict["pair"]) == 2
1750
+ evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu()
1751
+ z_reference_list = [z]
1752
+ z = None
1753
+
1754
+ # [*, N, C_s]
1755
+ s_initial = s
1756
+ s = self.linear_in(s)
1757
+
1758
+ # [*, N]
1759
+ rigids = Rigid.identity(
1760
+ s.shape[:-1],
1761
+ s.dtype,
1762
+ s.device,
1763
+ self.training,
1764
+ fmt="quat",
1765
+ )
1766
+ outputs = []
1767
+ for i in range(self.config.num_blocks):
1768
+ # [*, N, C_s]
1769
+ s = s + self.ipa(
1770
+ s,
1771
+ z,
1772
+ rigids,
1773
+ mask,
1774
+ _offload_inference=_offload_inference,
1775
+ _z_reference_list=z_reference_list,
1776
+ )
1777
+ s = self.ipa_dropout(s)
1778
+ s = self.layer_norm_ipa(s)
1779
+ s = self.transition(s)
1780
+
1781
+ # [*, N]
1782
+ rigids = rigids.compose_q_update_vec(self.bb_update(s))
1783
+
1784
+ # To hew as closely as possible to AlphaFold, we convert our
1785
+ # quaternion-based transformations to rotation-matrix ones
1786
+ # here
1787
+ backb_to_global = Rigid(
1788
+ Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None),
1789
+ rigids.get_trans(),
1790
+ )
1791
+
1792
+ backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor)
1793
+
1794
+ # [*, N, 7, 2]
1795
+ unnormalized_angles, angles = self.angle_resnet(s, s_initial)
1796
+
1797
+ all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype)
1798
+
1799
+ pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype)
1800
+
1801
+ scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor)
1802
+
1803
+ preds = {
1804
+ "frames": scaled_rigids.to_tensor_7(),
1805
+ "sidechain_frames": all_frames_to_global.to_tensor_4x4(),
1806
+ "unnormalized_angles": unnormalized_angles,
1807
+ "angles": angles,
1808
+ "positions": pred_xyz,
1809
+ "states": s,
1810
+ }
1811
+
1812
+ outputs.append(preds)
1813
+
1814
+ rigids = rigids.stop_rot_gradient()
1815
+
1816
+ del z, z_reference_list
1817
+
1818
+ if _offload_inference:
1819
+ evoformer_output_dict["pair"] = evoformer_output_dict["pair"].to(s.device)
1820
+
1821
+ outputs = dict_multimap(torch.stack, outputs)
1822
+ outputs["single"] = s
1823
+
1824
+ return outputs
1825
+
1826
+ def _init_residue_constants(self, float_dtype, device):
1827
+ if not hasattr(self, "default_frames"):
1828
+ self.register_buffer(
1829
+ "default_frames",
1830
+ torch.tensor(
1831
+ residue_constants.restype_rigid_group_default_frame,
1832
+ dtype=float_dtype,
1833
+ device=device,
1834
+ requires_grad=False,
1835
+ ),
1836
+ persistent=False,
1837
+ )
1838
+ if not hasattr(self, "group_idx"):
1839
+ self.register_buffer(
1840
+ "group_idx",
1841
+ torch.tensor(
1842
+ residue_constants.restype_atom14_to_rigid_group,
1843
+ device=device,
1844
+ requires_grad=False,
1845
+ ),
1846
+ persistent=False,
1847
+ )
1848
+ if not hasattr(self, "atom_mask"):
1849
+ self.register_buffer(
1850
+ "atom_mask",
1851
+ torch.tensor(
1852
+ residue_constants.restype_atom14_mask,
1853
+ dtype=float_dtype,
1854
+ device=device,
1855
+ requires_grad=False,
1856
+ ),
1857
+ persistent=False,
1858
+ )
1859
+ if not hasattr(self, "lit_positions"):
1860
+ self.register_buffer(
1861
+ "lit_positions",
1862
+ torch.tensor(
1863
+ residue_constants.restype_atom14_rigid_group_positions,
1864
+ dtype=float_dtype,
1865
+ device=device,
1866
+ requires_grad=False,
1867
+ ),
1868
+ persistent=False,
1869
+ )
1870
+
1871
+ def torsion_angles_to_frames(self, r, alpha, f):
1872
+ # Lazily initialize the residue constants on the correct device
1873
+ self._init_residue_constants(alpha.dtype, alpha.device)
1874
+ # Separated purely to make testing less annoying
1875
+ return torsion_angles_to_frames(r, alpha, f, self.default_frames)
1876
+
1877
+ def frames_and_literature_positions_to_atom14_pos(self, r, f): # [*, N, 8] # [*, N]
1878
+ # Lazily initialize the residue constants on the correct device
1879
+ self._init_residue_constants(r.get_rots().dtype, r.get_rots().device)
1880
+ return frames_and_literature_positions_to_atom14_pos(
1881
+ r,
1882
+ f,
1883
+ self.default_frames,
1884
+ self.group_idx,
1885
+ self.atom_mask,
1886
+ self.lit_positions,
1887
+ )
1888
+
1889
+
1890
+ class EsmFoldingTrunk(nn.Module):
1891
+ def __init__(self, config):
1892
+ super().__init__()
1893
+ self.config = config
1894
+
1895
+ c_s = config.sequence_state_dim
1896
+ c_z = config.pairwise_state_dim
1897
+
1898
+ self.pairwise_positional_embedding = EsmFoldRelativePosition(config)
1899
+
1900
+ self.blocks = nn.ModuleList([EsmFoldTriangularSelfAttentionBlock(config) for _ in range(config.num_blocks)])
1901
+
1902
+ self.recycle_bins = 15
1903
+ self.recycle_s_norm = nn.LayerNorm(c_s)
1904
+ self.recycle_z_norm = nn.LayerNorm(c_z)
1905
+ self.recycle_disto = nn.Embedding(self.recycle_bins, c_z)
1906
+ self.recycle_disto.weight[0].detach().zero_()
1907
+
1908
+ self.structure_module = EsmFoldStructureModule(config.structure_module)
1909
+ self.trunk2sm_s = nn.Linear(c_s, config.structure_module.sequence_dim)
1910
+ self.trunk2sm_z = nn.Linear(c_z, config.structure_module.pairwise_dim)
1911
+
1912
+ self.chunk_size = config.chunk_size
1913
+
1914
+ def set_chunk_size(self, chunk_size):
1915
+ # This parameter means the axial attention will be computed
1916
+ # in a chunked manner. This should make the memory used more or less O(L) instead of O(L^2).
1917
+ # It's equivalent to running a for loop over chunks of the dimension we're iterative over,
1918
+ # where the chunk_size is the size of the chunks, so 128 would mean to parse 128-length chunks.
1919
+ self.chunk_size = chunk_size
1920
+
1921
+ def forward(self, seq_feats, pair_feats, true_aa, residx, mask, no_recycles):
1922
+ """
1923
+ Inputs:
1924
+ seq_feats: B x L x C tensor of sequence features pair_feats: B x L x L x C tensor of pair features residx: B
1925
+ x L long tensor giving the position in the sequence mask: B x L boolean tensor indicating valid residues
1926
+
1927
+ Output:
1928
+ predicted_structure: B x L x (num_atoms_per_residue * 3) tensor wrapped in a Coordinates object
1929
+ """
1930
+
1931
+ device = seq_feats.device
1932
+ s_s_0 = seq_feats
1933
+ s_z_0 = pair_feats
1934
+
1935
+ if no_recycles is None:
1936
+ no_recycles = self.config.max_recycles
1937
+ else:
1938
+ if no_recycles < 0:
1939
+ raise ValueError("Number of recycles must not be negative.")
1940
+ no_recycles += 1 # First 'recycle' is just the standard forward pass through the model.
1941
+
1942
+ def trunk_iter(s, z, residx, mask):
1943
+ z = z + self.pairwise_positional_embedding(residx, mask=mask)
1944
+
1945
+ for block in self.blocks:
1946
+ s, z = block(s, z, mask=mask, residue_index=residx, chunk_size=self.chunk_size)
1947
+ return s, z
1948
+
1949
+ s_s = s_s_0
1950
+ s_z = s_z_0
1951
+ recycle_s = torch.zeros_like(s_s)
1952
+ recycle_z = torch.zeros_like(s_z)
1953
+ recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64)
1954
+
1955
+ for recycle_idx in range(no_recycles):
1956
+ with ContextManagers([] if recycle_idx == no_recycles - 1 else [torch.no_grad()]):
1957
+ # === Recycling ===
1958
+ recycle_s = self.recycle_s_norm(recycle_s.detach()).to(device)
1959
+ recycle_z = self.recycle_z_norm(recycle_z.detach()).to(device)
1960
+ recycle_z += self.recycle_disto(recycle_bins.detach()).to(device)
1961
+
1962
+ s_s, s_z = trunk_iter(s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask)
1963
+
1964
+ # === Structure module ===
1965
+ structure = self.structure_module(
1966
+ {"single": self.trunk2sm_s(s_s), "pair": self.trunk2sm_z(s_z)},
1967
+ true_aa,
1968
+ mask.float(),
1969
+ )
1970
+
1971
+ recycle_s = s_s
1972
+ recycle_z = s_z
1973
+ # Distogram needs the N, CA, C coordinates, and bin constants same as alphafold.
1974
+ recycle_bins = EsmFoldingTrunk.distogram(
1975
+ structure["positions"][-1][:, :, :3],
1976
+ 3.375,
1977
+ 21.375,
1978
+ self.recycle_bins,
1979
+ )
1980
+
1981
+ structure["s_s"] = s_s
1982
+ structure["s_z"] = s_z
1983
+
1984
+ return structure
1985
+
1986
+ @staticmethod
1987
+ def distogram(coords, min_bin, max_bin, num_bins):
1988
+ # Coords are [... L x 3 x 3], where it's [N, CA, C] x 3 coordinates.
1989
+ boundaries = torch.linspace(
1990
+ min_bin,
1991
+ max_bin,
1992
+ num_bins - 1,
1993
+ device=coords.device,
1994
+ )
1995
+ boundaries = boundaries**2
1996
+ N, CA, C = [x.squeeze(-2) for x in coords.chunk(3, dim=-2)]
1997
+ # Infer CB coordinates.
1998
+ b = CA - N
1999
+ c = C - CA
2000
+ a = b.cross(c, dim=-1)
2001
+ CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA
2002
+ dists = (CB[..., None, :, :] - CB[..., :, None, :]).pow(2).sum(dim=-1, keepdims=True)
2003
+ bins = torch.sum(dists > boundaries, dim=-1) # [..., L, L]
2004
+ return bins
2005
+
2006
+
2007
+ # TODO Add information to the docstring about any methods that convert to PDB format, or otherwise prepare
2008
+ # the outputs for downstream use.
2009
+
2010
+
2011
+ @add_start_docstrings(
2012
+ """
2013
+ ESMForProteinFolding is the HuggingFace port of the original ESMFold model. It consists of an ESM-2 "stem" followed
2014
+ by a protein folding "head", although unlike most other output heads, this "head" is similar in size and runtime to
2015
+ the rest of the model combined! It outputs a dictionary containing predicted structural information about the input
2016
+ protein(s).
2017
+ """,
2018
+ ESM_START_DOCSTRING,
2019
+ )
2020
+ class EsmForProteinFolding(EsmPreTrainedModel):
2021
+ _no_split_modules = ["EsmFoldStructureModule", "EsmFoldTriangularSelfAttentionBlock"]
2022
+
2023
+ def __init__(self, config):
2024
+ super().__init__(config)
2025
+
2026
+ self.config = config
2027
+
2028
+ self.distogram_bins = 64
2029
+
2030
+ self.esm = EsmModel(config, add_pooling_layer=False)
2031
+
2032
+ self.esm.requires_grad_(False)
2033
+ if self.config.esmfold_config.fp16_esm:
2034
+ self.esm.half()
2035
+
2036
+ self.esm_feats = self.config.hidden_size
2037
+ self.esm_attns = self.config.num_hidden_layers * self.config.num_attention_heads
2038
+ self.esm_layers = self.config.num_hidden_layers
2039
+ self.register_buffer("af2_to_esm", self._af2_to_esm_from_vocab_list(config.vocab_list))
2040
+ self.esm_s_combine = nn.Parameter(torch.zeros(self.esm_layers + 1))
2041
+
2042
+ trunk_config = self.config.esmfold_config.trunk
2043
+ c_s = trunk_config.sequence_state_dim
2044
+ c_z = trunk_config.pairwise_state_dim
2045
+ self.esm_s_mlp = nn.Sequential(
2046
+ LayerNorm(self.esm_feats),
2047
+ nn.Linear(self.esm_feats, c_s),
2048
+ nn.ReLU(),
2049
+ nn.Linear(c_s, c_s),
2050
+ )
2051
+
2052
+ # 0 is padding, N is unknown residues, N + 1 is mask.
2053
+ self.n_tokens_embed = residue_constants.restype_num + 3
2054
+ self.pad_idx = 0
2055
+ self.unk_idx = self.n_tokens_embed - 2
2056
+ self.mask_idx = self.n_tokens_embed - 1
2057
+ self.esm_dict_cls_idx = self.config.vocab_list.index("<cls>")
2058
+ self.esm_dict_mask_idx = self.config.vocab_list.index("<mask>")
2059
+ self.esm_dict_eos_idx = self.config.vocab_list.index("<eos>")
2060
+ self.esm_dict_padding_idx = self.config.vocab_list.index("<pad>")
2061
+ if self.config.esmfold_config.embed_aa:
2062
+ self.embedding = nn.Embedding(self.n_tokens_embed, c_s, padding_idx=0)
2063
+
2064
+ self.trunk = EsmFoldingTrunk(trunk_config)
2065
+
2066
+ self.distogram_head = nn.Linear(c_z, self.distogram_bins)
2067
+ self.ptm_head = nn.Linear(c_z, self.distogram_bins)
2068
+ self.lm_head = nn.Linear(c_s, self.n_tokens_embed)
2069
+ self.lddt_bins = 50
2070
+ structure_module_config = trunk_config.structure_module
2071
+ self.lddt_head = nn.Sequential(
2072
+ nn.LayerNorm(structure_module_config.sequence_dim),
2073
+ nn.Linear(structure_module_config.sequence_dim, self.config.esmfold_config.lddt_head_hid_dim),
2074
+ nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, self.config.esmfold_config.lddt_head_hid_dim),
2075
+ nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, 37 * self.lddt_bins),
2076
+ )
2077
+
2078
+ @staticmethod
2079
+ def _af2_to_esm_from_vocab_list(vocab_list: List[str]) -> torch.Tensor:
2080
+ # Remember that t is shifted from residue_constants by 1 (0 is padding).
2081
+ esm_reorder = [vocab_list.index("<pad>")] + [vocab_list.index(v) for v in residue_constants.restypes_with_x]
2082
+ return torch.tensor(esm_reorder)
2083
+
2084
+ @add_start_docstrings_to_model_forward(ESMFOLD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
2085
+ @replace_return_docstrings(output_type=EsmForProteinFoldingOutput, config_class=EsmConfig)
2086
+ def forward(
2087
+ self,
2088
+ input_ids: torch.Tensor,
2089
+ attention_mask: Optional[torch.Tensor] = None,
2090
+ position_ids: Optional[torch.Tensor] = None,
2091
+ masking_pattern: Optional[torch.Tensor] = None,
2092
+ num_recycles: Optional[int] = None,
2093
+ ) -> EsmForProteinFoldingOutput:
2094
+ r"""
2095
+ Returns:
2096
+
2097
+ Example:
2098
+
2099
+ ```python
2100
+ >>> from transformers import AutoTokenizer, EsmForProteinFolding
2101
+
2102
+ >>> model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1")
2103
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1")
2104
+ >>> inputs = tokenizer(["MLKNVQVQLV"], return_tensors="pt", add_special_tokens=False) # A tiny random peptide
2105
+ >>> outputs = model(**inputs)
2106
+ >>> folded_positions = outputs.positions
2107
+ ```
2108
+
2109
+ """
2110
+ cfg = self.config.esmfold_config
2111
+
2112
+ aa = input_ids # B x L
2113
+ B = aa.shape[0]
2114
+ L = aa.shape[1]
2115
+ device = input_ids.device
2116
+ if attention_mask is None:
2117
+ attention_mask = torch.ones_like(aa, device=device)
2118
+ if position_ids is None:
2119
+ position_ids = torch.arange(L, device=device).expand_as(input_ids)
2120
+
2121
+ # === ESM ===
2122
+ esmaa = self.af2_idx_to_esm_idx(aa, attention_mask)
2123
+
2124
+ if masking_pattern is not None:
2125
+ masked_aa, esmaa, mlm_targets = self.bert_mask(aa, esmaa, attention_mask, masking_pattern)
2126
+ else:
2127
+ masked_aa = aa
2128
+ mlm_targets = None
2129
+
2130
+ # We get sequence and pair representations from whatever version of ESM /
2131
+ # configuration we are using. The sequence representation esm_s is always
2132
+ # present. The pair embedding esm_z may be present depending on the
2133
+ # configuration of the model. If esm_z is not used by the model then it
2134
+ # is returned as None here.
2135
+ esm_s = self.compute_language_model_representations(esmaa)
2136
+
2137
+ # Convert esm_s and esm_z, if present, to the precision used by the trunk and
2138
+ # the structure module. These tensors may be a lower precision if, for example,
2139
+ # we're running the language model in fp16 precision.
2140
+ esm_s = esm_s.to(self.esm_s_combine.dtype)
2141
+
2142
+ if cfg.esm_ablate_sequence:
2143
+ esm_s = esm_s * 0
2144
+
2145
+ esm_s = esm_s.detach()
2146
+
2147
+ # === preprocessing ===
2148
+ esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2)
2149
+ s_s_0 = self.esm_s_mlp(esm_s)
2150
+
2151
+ s_z_0 = s_s_0.new_zeros(B, L, L, cfg.trunk.pairwise_state_dim)
2152
+
2153
+ if self.config.esmfold_config.embed_aa:
2154
+ s_s_0 += self.embedding(masked_aa)
2155
+
2156
+ structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles)
2157
+ # Documenting what we expect:
2158
+ structure = {
2159
+ k: v
2160
+ for k, v in structure.items()
2161
+ if k
2162
+ in [
2163
+ "s_z",
2164
+ "s_s",
2165
+ "frames",
2166
+ "sidechain_frames",
2167
+ "unnormalized_angles",
2168
+ "angles",
2169
+ "positions",
2170
+ "states",
2171
+ ]
2172
+ }
2173
+
2174
+ # Add BERT mask for the loss to use, if available.
2175
+ if mlm_targets:
2176
+ structure["mlm_targets"] = mlm_targets
2177
+
2178
+ disto_logits = self.distogram_head(structure["s_z"])
2179
+ disto_logits = (disto_logits + disto_logits.transpose(1, 2)) / 2
2180
+ structure["distogram_logits"] = disto_logits
2181
+
2182
+ lm_logits = self.lm_head(structure["s_s"])
2183
+ structure["lm_logits"] = lm_logits
2184
+
2185
+ structure["aatype"] = aa
2186
+ make_atom14_masks(structure)
2187
+ # Of course, this doesn't respect the true mask because it doesn't know about it...
2188
+ # We're not going to properly mask change of index tensors:
2189
+ # "residx_atom14_to_atom37",
2190
+ # "residx_atom37_to_atom14",
2191
+ for k in [
2192
+ "atom14_atom_exists",
2193
+ "atom37_atom_exists",
2194
+ ]:
2195
+ structure[k] *= attention_mask.unsqueeze(-1)
2196
+ structure["residue_index"] = position_ids
2197
+
2198
+ lddt_head = self.lddt_head(structure["states"]).reshape(structure["states"].shape[0], B, L, -1, self.lddt_bins)
2199
+ structure["lddt_head"] = lddt_head
2200
+ plddt = categorical_lddt(lddt_head[-1], bins=self.lddt_bins)
2201
+ structure["plddt"] = plddt
2202
+
2203
+ ptm_logits = self.ptm_head(structure["s_z"])
2204
+ structure["ptm_logits"] = ptm_logits
2205
+ structure["ptm"] = compute_tm(ptm_logits, max_bin=31, no_bins=self.distogram_bins)
2206
+ structure.update(compute_predicted_aligned_error(ptm_logits, max_bin=31, no_bins=self.distogram_bins))
2207
+
2208
+ return EsmForProteinFoldingOutput(**structure)
2209
+
2210
+ def af2_idx_to_esm_idx(self, aa, mask):
2211
+ # avoid indexing on different devices
2212
+ if self.af2_to_esm.device != aa.device:
2213
+ self.af2_to_esm = self.af2_to_esm.to(aa.device)
2214
+ aa = (aa + 1).masked_fill(mask != 1, 0)
2215
+ return self.af2_to_esm[aa]
2216
+
2217
+ def compute_language_model_representations(self, esmaa: torch.Tensor) -> torch.Tensor:
2218
+ device = next(self.parameters()).device
2219
+ B, L = esmaa.shape # B = batch size, L = sequence length.
2220
+
2221
+ if self.config.esmfold_config.bypass_lm:
2222
+ esm_s = torch.zeros(B, L, self.esm_s_combine.size[0], -1, self.esm_feats, device=device)
2223
+ return esm_s
2224
+
2225
+ bosi, eosi = self.esm_dict_cls_idx, self.esm_dict_eos_idx
2226
+ bos = esmaa.new_full((B, 1), bosi)
2227
+ eos = esmaa.new_full((B, 1), self.esm_dict_padding_idx)
2228
+ esmaa = torch.cat([bos, esmaa, eos], dim=1)
2229
+ # Use the first padding index as eos during inference.
2230
+ esmaa[range(B), (esmaa != 1).sum(1)] = eosi
2231
+
2232
+ # _, esm_z, esm_s = self.esm(esmaa, return_pairs=self.config.esmfold_config.use_esm_attn_map)
2233
+ # Because we do not support use_esm_attn_map in the HF port as it is not used in any public models,
2234
+ # esm_z is always None
2235
+ esm_hidden_states = self.esm(esmaa, attention_mask=esmaa != 1, output_hidden_states=True)["hidden_states"]
2236
+ esm_s = torch.stack(esm_hidden_states, dim=2)
2237
+
2238
+ esm_s = esm_s[:, 1:-1] # B, L, nLayers, C
2239
+
2240
+ return esm_s
2241
+
2242
+ def bert_mask(self, aa, esmaa, mask, pattern):
2243
+ new_aa = aa.clone()
2244
+ target = aa.clone()
2245
+ new_esmaa = esmaa.clone()
2246
+ new_aa[pattern == 1] = self.mask_idx
2247
+ target[pattern != 1] = 0
2248
+ new_esmaa[pattern == 1] = self.esm_dict_mask_idx
2249
+ return new_aa, new_esmaa, target
2250
+
2251
+ @torch.no_grad()
2252
+ def infer(
2253
+ self,
2254
+ seqs: Union[str, List[str]],
2255
+ position_ids=None,
2256
+ ):
2257
+ if isinstance(seqs, str):
2258
+ lst = [seqs]
2259
+ else:
2260
+ lst = seqs
2261
+ # Returns the raw outputs of the model given an input sequence.
2262
+ device = next(self.parameters()).device
2263
+ aatype = collate_dense_tensors(
2264
+ [
2265
+ torch.from_numpy(
2266
+ residue_constants.sequence_to_onehot(
2267
+ sequence=seq,
2268
+ mapping=residue_constants.restype_order_with_x,
2269
+ map_unknown_to_x=True,
2270
+ )
2271
+ )
2272
+ .to(device)
2273
+ .argmax(dim=1)
2274
+ for seq in lst
2275
+ ]
2276
+ ) # B=1 x L
2277
+ mask = collate_dense_tensors([aatype.new_ones(len(seq)) for seq in lst])
2278
+ position_ids = (
2279
+ torch.arange(aatype.shape[1], device=device).expand(len(lst), -1)
2280
+ if position_ids is None
2281
+ else position_ids.to(device)
2282
+ )
2283
+ if position_ids.ndim == 1:
2284
+ position_ids = position_ids.unsqueeze(0)
2285
+ return self.forward(
2286
+ aatype,
2287
+ mask,
2288
+ position_ids=position_ids,
2289
+ )
2290
+
2291
+ @staticmethod
2292
+ def output_to_pdb(output: Dict) -> List[str]:
2293
+ """Returns the pbd (file) string from the model given the model output."""
2294
+ output = {k: v.to("cpu").numpy() for k, v in output.items()}
2295
+ pdbs = []
2296
+ final_atom_positions = atom14_to_atom37(output["positions"][-1], output)
2297
+ final_atom_mask = output["atom37_atom_exists"]
2298
+ for i in range(output["aatype"].shape[0]):
2299
+ aa = output["aatype"][i]
2300
+ pred_pos = final_atom_positions[i]
2301
+ mask = final_atom_mask[i]
2302
+ resid = output["residue_index"][i] + 1
2303
+ pred = OFProtein(
2304
+ aatype=aa,
2305
+ atom_positions=pred_pos,
2306
+ atom_mask=mask,
2307
+ residue_index=resid,
2308
+ b_factors=output["plddt"][i],
2309
+ )
2310
+ pdbs.append(to_pdb(pred))
2311
+ return pdbs
2312
+
2313
+ def infer_pdb(self, seqs, *args, **kwargs) -> str:
2314
+ """Returns the pdb (file) string from the model given an input sequence."""
2315
+ assert isinstance(seqs, str)
2316
+ output = self.infer(seqs, *args, **kwargs)
2317
+ return self.output_to_pdb(output)[0]
2318
+
2319
+ def infer_pdbs(self, seqs: List[str], *args, **kwargs) -> List[str]:
2320
+ """Returns the pdb (file) string from the model given an input sequence."""
2321
+ output = self.infer(seqs, *args, **kwargs)
2322
+ return self.output_to_pdb(output)
venv/lib/python3.10/site-packages/transformers/models/esm/modeling_tf_esm.py ADDED
@@ -0,0 +1,1567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ESM model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import os
21
+ from typing import Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+
26
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
27
+ from ...modeling_tf_outputs import (
28
+ TFBaseModelOutputWithPastAndCrossAttentions,
29
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
30
+ TFMaskedLMOutput,
31
+ TFSequenceClassifierOutput,
32
+ TFTokenClassifierOutput,
33
+ )
34
+ from ...modeling_tf_utils import (
35
+ TFMaskedLanguageModelingLoss,
36
+ TFModelInputType,
37
+ TFPreTrainedModel,
38
+ TFSequenceClassificationLoss,
39
+ TFTokenClassificationLoss,
40
+ get_initializer,
41
+ keras,
42
+ shape_list,
43
+ unpack_inputs,
44
+ )
45
+ from ...tf_utils import check_embeddings_within_bounds, stable_softmax
46
+ from ...utils import logging
47
+ from .configuration_esm import EsmConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D"
53
+ _CONFIG_FOR_DOC = "EsmConfig"
54
+
55
+
56
+ def rotate_half(x):
57
+ x1, x2 = tf.split(x, 2, axis=-1)
58
+ return tf.concat((-x2, x1), axis=-1)
59
+
60
+
61
+ def apply_rotary_pos_emb(x, cos, sin):
62
+ cos = cos[:, :, : tf.shape(x)[-2], :]
63
+ sin = sin[:, :, : tf.shape(x)[-2], :]
64
+
65
+ return (x * cos) + (rotate_half(x) * sin)
66
+
67
+
68
+ def symmetrize(x):
69
+ "Make layer symmetric in final two dimensions, used for contact prediction."
70
+ return x + tf.linalg.matrix_transpose(x) # Transposes last two dimensions only
71
+
72
+
73
+ def average_product_correct(x):
74
+ "Perform average product correct, used for contact prediction."
75
+ a1 = tf.reduce_sum(x, -1, keepdims=True)
76
+ a2 = tf.reduce_sum(x, -2, keepdims=True)
77
+ a12 = tf.reduce_sum(x, (-1, -2), keepdims=True)
78
+
79
+ avg = a1 * a2
80
+ avg = avg / a12
81
+ normalized = x - avg
82
+ return normalized
83
+
84
+
85
+ class TFRotaryEmbedding(keras.layers.Layer):
86
+ """
87
+ Rotary position embeddings based on those in
88
+ [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
89
+ matrices which depend on their relative positions.
90
+ """
91
+
92
+ def __init__(self, dim: int, name=None):
93
+ super().__init__(name=name)
94
+ # Matt: The PyTorch version of this layer does a lot of work to cache values, but we just rely on TF compilation
95
+ # and/or XLA to sort out constants like that. It actually may not seem like this layer needs to be stateful at
96
+ # all when we benefit from TF compilation, but it does. The reason is that self.inv_freq is a buffer in the
97
+ # original implementation, but all the shared ESM checkpoints were trained with fp16 params. This means that
98
+ # the inv_freq tensor was stored as a float16, and we need to replicate those lower-precision values or our
99
+ # models give different outputs from the original.
100
+ self.dim = dim
101
+
102
+ def build(self, input_shape):
103
+ super().build(input_shape)
104
+ self.inv_freq = self.add_weight(
105
+ "inv_freq", shape=(self.dim // 2,), dtype=tf.float32, initializer=get_initializer(1.0), trainable=False
106
+ )
107
+ self.inv_freq.assign(
108
+ 1.0 / (10000 ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim))
109
+ )
110
+
111
+ def _compute_cos_sin(self, x, seq_dimension=2):
112
+ seq_len = tf.shape(x)[seq_dimension]
113
+
114
+ t = tf.range(seq_len, dtype=self.inv_freq.dtype)
115
+ freqs = tf.einsum("i, j -> ij", t, self.inv_freq) # Outer multiplication
116
+ emb = tf.concat((freqs, freqs), axis=-1)[None, None, :, :]
117
+
118
+ return tf.cos(emb), tf.sin(emb)
119
+
120
+ def call(self, q: tf.Tensor, k: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
121
+ cos_emb, sin_emb = self._compute_cos_sin(k, seq_dimension=-2)
122
+
123
+ return (
124
+ apply_rotary_pos_emb(q, cos_emb, sin_emb),
125
+ apply_rotary_pos_emb(k, cos_emb, sin_emb),
126
+ )
127
+
128
+
129
+ class TFEsmContactPredictionHead(keras.layers.Layer):
130
+ """Performs symmetrization, apc, and computes a logistic regression on the output features"""
131
+
132
+ def __init__(
133
+ self,
134
+ in_features: int,
135
+ bias=True,
136
+ eos_idx: int = 2,
137
+ name=None,
138
+ ):
139
+ super().__init__(name=name)
140
+ self.eos_idx = eos_idx
141
+ self.in_features = in_features
142
+ self.regression = keras.layers.Dense(1, use_bias=bias, activation="sigmoid", name="regression")
143
+
144
+ def build(self, input_shape=None):
145
+ if self.built:
146
+ return
147
+ self.built = True
148
+ if getattr(self, "regression", None) is not None:
149
+ with tf.name_scope(self.regression.name):
150
+ self.regression.build((None, self.in_features))
151
+
152
+ def call(self, tokens, attentions):
153
+ # remove eos token attentions
154
+ eos_mask = tf.cast(tokens != self.eos_idx, attentions.dtype)
155
+ eos_mask = tf.expand_dims(eos_mask, 1) * tf.expand_dims(eos_mask, 2)
156
+ attentions = attentions * eos_mask[:, None, None, :, :]
157
+ attentions = attentions[..., :-1, :-1]
158
+ # remove cls token attentions
159
+ attentions = attentions[..., 1:, 1:]
160
+ batch_size, layers, heads, seqlen, _ = shape_list(attentions)
161
+ attentions = tf.reshape(attentions, (batch_size, layers * heads, seqlen, seqlen))
162
+
163
+ # features: batch x channels x tokens x tokens (symmetric)
164
+ attentions = average_product_correct(symmetrize(attentions))
165
+ attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))
166
+ return tf.squeeze(self.regression(attentions), 3)
167
+
168
+
169
+ class TFEsmEmbeddings(keras.layers.Layer):
170
+ """
171
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
172
+ """
173
+
174
+ def __init__(self, config, name=None):
175
+ super().__init__(name=name)
176
+ self.word_embeddings = keras.layers.Embedding(
177
+ config.vocab_size,
178
+ config.hidden_size,
179
+ embeddings_initializer=get_initializer(config.initializer_range),
180
+ name="word_embeddings",
181
+ )
182
+ self.position_embeddings = keras.layers.Embedding(
183
+ config.max_position_embeddings,
184
+ config.hidden_size,
185
+ embeddings_initializer=get_initializer(config.initializer_range),
186
+ name="position_embeddings",
187
+ )
188
+
189
+ if config.emb_layer_norm_before:
190
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
191
+ else:
192
+ self.layer_norm = None
193
+ # Matt: I think this line was copied incorrectly from BERT, disabling for now
194
+ # self.dropout = Dropout(config.hidden_dropout_prob)
195
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
196
+
197
+ self.position_ids = tf.range(config.max_position_embeddings)[None, :]
198
+
199
+ self.padding_idx = config.pad_token_id
200
+ self.token_dropout = config.token_dropout
201
+ self.mask_token_id = config.mask_token_id
202
+ self.config = config
203
+
204
+ def call(
205
+ self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
206
+ ):
207
+ if position_ids is None:
208
+ if input_ids is not None:
209
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
210
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
211
+ else:
212
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
213
+
214
+ if inputs_embeds is None:
215
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
216
+ inputs_embeds = self.word_embeddings(input_ids)
217
+
218
+ # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an
219
+ # embedding_scale factor here.
220
+ embeddings = inputs_embeds
221
+
222
+ # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout
223
+ # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however,
224
+ # masked tokens are treated as if they were selected for input dropout and zeroed out.
225
+ # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by
226
+ # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample).
227
+ # This is analogous to the way that dropout layers scale down outputs during evaluation when not
228
+ # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training).
229
+ if self.token_dropout:
230
+ embeddings = tf.where((input_ids == self.mask_token_id)[:, :, None], 0.0, embeddings)
231
+ mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs
232
+ src_lengths = tf.cast(tf.reduce_sum(attention_mask, axis=-1), tf.float32)
233
+ masked_tokens = input_ids == self.mask_token_id
234
+ mask_ratio_observed = tf.math.count_nonzero(masked_tokens, dtype=tf.float32, axis=-1) / src_lengths
235
+ embeddings = embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
236
+
237
+ if self.position_embedding_type == "absolute":
238
+ position_embeddings = self.position_embeddings(position_ids)
239
+ embeddings += position_embeddings
240
+
241
+ if self.layer_norm is not None:
242
+ embeddings = self.layer_norm(embeddings)
243
+ if attention_mask is not None:
244
+ embeddings = embeddings * tf.cast(tf.expand_dims(attention_mask, -1), embeddings.dtype)
245
+ # Matt: I think this line was copied incorrectly from BERT, disabling it for now.
246
+ # embeddings = self.dropout(embeddings)
247
+ return embeddings
248
+
249
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
250
+ """
251
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
252
+
253
+ Args:
254
+ inputs_embeds: tf.Tensor
255
+
256
+ Returns: tf.Tensor
257
+ """
258
+ input_shape = shape_list(inputs_embeds)[:-1]
259
+ sequence_length = input_shape[1]
260
+
261
+ position_ids = tf.range(
262
+ start=self.padding_idx + 1, limit=sequence_length + self.padding_idx + 1, dtype=tf.int64
263
+ )
264
+ return tf.broadcast_to(tf.expand_dims(position_ids, 0), input_shape)
265
+
266
+ def build(self, input_shape=None):
267
+ if self.built:
268
+ return
269
+ self.built = True
270
+ if getattr(self, "word_embeddings", None) is not None:
271
+ with tf.name_scope(self.word_embeddings.name):
272
+ self.word_embeddings.build(None)
273
+ if getattr(self, "position_embeddings", None) is not None:
274
+ with tf.name_scope(self.position_embeddings.name):
275
+ self.position_embeddings.build(None)
276
+ if getattr(self, "layer_norm", None) is not None:
277
+ with tf.name_scope(self.layer_norm.name):
278
+ self.layer_norm.build([None, None, self.config.hidden_size])
279
+
280
+
281
+ class TFEsmSelfAttention(keras.layers.Layer):
282
+ def __init__(self, config, position_embedding_type=None, name=None):
283
+ super().__init__(name=name)
284
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
285
+ raise ValueError(
286
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
287
+ f"heads ({config.num_attention_heads})"
288
+ )
289
+
290
+ self.num_attention_heads = config.num_attention_heads
291
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
292
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
293
+
294
+ self.query = keras.layers.Dense(
295
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
296
+ )
297
+ self.key = keras.layers.Dense(
298
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
299
+ )
300
+ self.value = keras.layers.Dense(
301
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
302
+ )
303
+
304
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
305
+ self.position_embedding_type = position_embedding_type or getattr(
306
+ config, "position_embedding_type", "absolute"
307
+ )
308
+ self.rotary_embeddings = None
309
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
310
+ self.max_position_embeddings = config.max_position_embeddings
311
+ self.distance_embedding = keras.layers.Embedding(
312
+ 2 * config.max_position_embeddings - 1,
313
+ self.attention_head_size,
314
+ embeddings_initializer=get_initializer(config.initializer_range),
315
+ )
316
+ elif self.position_embedding_type == "rotary":
317
+ self.rotary_embeddings = TFRotaryEmbedding(dim=self.attention_head_size, name="rotary_embeddings")
318
+
319
+ self.is_decoder = config.is_decoder
320
+ self.config = config
321
+
322
+ def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor:
323
+ new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size]
324
+ x = tf.reshape(x, new_x_shape)
325
+ return tf.transpose(x, perm=(0, 2, 1, 3))
326
+
327
+ def call(
328
+ self,
329
+ hidden_states: tf.Tensor,
330
+ attention_mask: tf.Tensor | None = None,
331
+ head_mask: tf.Tensor | None = None,
332
+ encoder_hidden_states: tf.Tensor | None = None,
333
+ encoder_attention_mask: tf.Tensor | None = None,
334
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
335
+ output_attentions: Optional[bool] = False,
336
+ training: bool = False,
337
+ ) -> Tuple[tf.Tensor]:
338
+ mixed_query_layer = self.query(hidden_states)
339
+
340
+ # If this is instantiated as a cross-attention module, the keys
341
+ # and values come from an encoder; the attention mask needs to be
342
+ # such that the encoder's padding tokens are not attended to.
343
+ is_cross_attention = encoder_hidden_states is not None
344
+
345
+ if is_cross_attention and past_key_value is not None:
346
+ # reuse k,v, cross_attentions
347
+ key_layer = past_key_value[0]
348
+ value_layer = past_key_value[1]
349
+ attention_mask = encoder_attention_mask
350
+ elif is_cross_attention:
351
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
352
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
353
+ attention_mask = encoder_attention_mask
354
+ elif past_key_value is not None:
355
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
356
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
357
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
358
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
359
+ else:
360
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
361
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
362
+
363
+ query_layer = self.transpose_for_scores(mixed_query_layer)
364
+
365
+ # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim).
366
+ # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent,
367
+ # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original
368
+ # ESM code and fix rotary embeddings.
369
+ query_layer = query_layer * self.attention_head_size**-0.5
370
+
371
+ if self.is_decoder:
372
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
373
+ # Further calls to cross_attention layer can then reuse all cross-attention
374
+ # key/value_states (first "if" case)
375
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
376
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
377
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
378
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
379
+ past_key_value = (key_layer, value_layer)
380
+
381
+ if self.position_embedding_type == "rotary":
382
+ query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
383
+
384
+ # Take the dot product between "query" and "key" to get the raw attention scores.
385
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
386
+
387
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
388
+ seq_length = shape_list(hidden_states)[1]
389
+ position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), -1)
390
+ position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), 0)
391
+ distance = position_ids_l - position_ids_r
392
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
393
+ positional_embedding = tf.cast(positional_embedding, query_layer.dtype) # fp16 compatibility
394
+
395
+ if self.position_embedding_type == "relative_key":
396
+ relative_position_scores = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
397
+ attention_scores = attention_scores + relative_position_scores
398
+ elif self.position_embedding_type == "relative_key_query":
399
+ relative_position_scores_query = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
400
+ relative_position_scores_key = tf.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
401
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
402
+
403
+ if attention_mask is not None:
404
+ # Apply the attention mask is (precomputed for all layers in EsmModel forward() function)
405
+ attention_scores = attention_scores + attention_mask
406
+
407
+ # Normalize the attention scores to probabilities.
408
+ attention_probs = stable_softmax(attention_scores, axis=-1)
409
+
410
+ # This is actually dropping out entire tokens to attend to, which might
411
+ # seem a bit unusual, but is taken from the original Transformer paper.
412
+ attention_probs = self.dropout(attention_probs, training=training)
413
+
414
+ # Mask heads if we want to
415
+ if head_mask is not None:
416
+ attention_probs = attention_probs * head_mask
417
+
418
+ context_layer = attention_probs @ value_layer
419
+
420
+ context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3))
421
+ new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size]
422
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
423
+
424
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
425
+
426
+ if self.is_decoder:
427
+ outputs = outputs + (past_key_value,)
428
+ return outputs
429
+
430
+ def build(self, input_shape=None):
431
+ if self.built:
432
+ return
433
+ self.built = True
434
+ if getattr(self, "query", None) is not None:
435
+ with tf.name_scope(self.query.name):
436
+ self.query.build([None, None, self.config.hidden_size])
437
+ if getattr(self, "key", None) is not None:
438
+ with tf.name_scope(self.key.name):
439
+ self.key.build([None, None, self.config.hidden_size])
440
+ if getattr(self, "value", None) is not None:
441
+ with tf.name_scope(self.value.name):
442
+ self.value.build([None, None, self.config.hidden_size])
443
+ if getattr(self, "rotary_embeddings", None) is not None:
444
+ with tf.name_scope(self.rotary_embeddings.name):
445
+ self.rotary_embeddings.build(None)
446
+
447
+
448
+ class TFEsmSelfOutput(keras.layers.Layer):
449
+ def __init__(self, config, name=None):
450
+ super().__init__(name=name)
451
+ self.dense = keras.layers.Dense(
452
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
453
+ )
454
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
455
+ self.config = config
456
+
457
+ def call(self, hidden_states, input_tensor, training=False):
458
+ hidden_states = self.dense(hidden_states)
459
+ hidden_states = self.dropout(hidden_states, training=training)
460
+ hidden_states += input_tensor
461
+ return hidden_states
462
+
463
+ def build(self, input_shape=None):
464
+ if self.built:
465
+ return
466
+ self.built = True
467
+ if getattr(self, "dense", None) is not None:
468
+ with tf.name_scope(self.dense.name):
469
+ self.dense.build([None, None, self.config.hidden_size])
470
+
471
+
472
+ class TFEsmAttention(keras.layers.Layer):
473
+ def __init__(self, config, name=None):
474
+ super().__init__(name=name)
475
+ self.self = TFEsmSelfAttention(config, name="self")
476
+ self.output_layer = TFEsmSelfOutput(config, name="output")
477
+ self.pruned_heads = set()
478
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
479
+ self.config = config
480
+
481
+ def prune_heads(self, heads):
482
+ raise NotImplementedError
483
+
484
+ def call(
485
+ self,
486
+ hidden_states,
487
+ attention_mask=None,
488
+ head_mask=None,
489
+ encoder_hidden_states=None,
490
+ encoder_attention_mask=None,
491
+ past_key_value=None,
492
+ output_attentions=False,
493
+ training=False,
494
+ ):
495
+ hidden_states_ln = self.LayerNorm(hidden_states)
496
+ self_outputs = self.self(
497
+ hidden_states_ln,
498
+ attention_mask,
499
+ head_mask,
500
+ encoder_hidden_states,
501
+ encoder_attention_mask,
502
+ past_key_value,
503
+ output_attentions,
504
+ training,
505
+ )
506
+ attention_output = self.output_layer(self_outputs[0], hidden_states)
507
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
508
+ return outputs
509
+
510
+ def build(self, input_shape=None):
511
+ if self.built:
512
+ return
513
+ self.built = True
514
+ if getattr(self, "self", None) is not None:
515
+ with tf.name_scope(self.self.name):
516
+ self.self.build(None)
517
+ if getattr(self, "output_layer", None) is not None:
518
+ with tf.name_scope(self.output_layer.name):
519
+ self.output_layer.build(None)
520
+ if getattr(self, "LayerNorm", None) is not None:
521
+ with tf.name_scope(self.LayerNorm.name):
522
+ self.LayerNorm.build([None, None, self.config.hidden_size])
523
+
524
+
525
+ class TFEsmIntermediate(keras.layers.Layer):
526
+ def __init__(self, config: EsmConfig, **kwargs):
527
+ super().__init__(**kwargs)
528
+
529
+ self.dense = keras.layers.Dense(
530
+ units=config.intermediate_size,
531
+ kernel_initializer=get_initializer(config.initializer_range),
532
+ name="dense",
533
+ )
534
+ self.config = config
535
+
536
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
537
+ hidden_states = self.dense(inputs=hidden_states)
538
+ hidden_states = tf.nn.gelu(hidden_states)
539
+ return hidden_states
540
+
541
+ def build(self, input_shape=None):
542
+ if self.built:
543
+ return
544
+ self.built = True
545
+ if getattr(self, "dense", None) is not None:
546
+ with tf.name_scope(self.dense.name):
547
+ self.dense.build([None, None, self.config.hidden_size])
548
+
549
+
550
+ class TFEsmOutput(keras.layers.Layer):
551
+ def __init__(self, config, name=None):
552
+ super().__init__(name=name)
553
+ self.dense = keras.layers.Dense(
554
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
555
+ )
556
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
557
+ self.config = config
558
+
559
+ def call(self, hidden_states, input_tensor, training=False):
560
+ hidden_states = self.dense(hidden_states)
561
+ hidden_states = self.dropout(hidden_states, training=training)
562
+ hidden_states += input_tensor
563
+ return hidden_states
564
+
565
+ def build(self, input_shape=None):
566
+ if self.built:
567
+ return
568
+ self.built = True
569
+ if getattr(self, "dense", None) is not None:
570
+ with tf.name_scope(self.dense.name):
571
+ self.dense.build([None, None, self.config.intermediate_size])
572
+
573
+
574
+ class TFEsmLayer(keras.layers.Layer):
575
+ def __init__(self, config, name=None):
576
+ super().__init__(name=name)
577
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
578
+ self.seq_len_dim = 1
579
+ self.attention = TFEsmAttention(config, name="attention")
580
+ self.is_decoder = config.is_decoder
581
+ self.add_cross_attention = config.add_cross_attention
582
+ if self.add_cross_attention:
583
+ if not self.is_decoder:
584
+ raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
585
+ self.crossattention = TFEsmAttention(config)
586
+ self.intermediate = TFEsmIntermediate(config, name="intermediate")
587
+ self.output_layer = TFEsmOutput(config, name="output")
588
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
589
+ self.config = config
590
+
591
+ def call(
592
+ self,
593
+ hidden_states,
594
+ attention_mask=None,
595
+ head_mask=None,
596
+ encoder_hidden_states=None,
597
+ encoder_attention_mask=None,
598
+ past_key_value=None,
599
+ output_attentions=False,
600
+ training=False,
601
+ ):
602
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
603
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
604
+ self_attention_outputs = self.attention(
605
+ hidden_states,
606
+ attention_mask,
607
+ head_mask,
608
+ output_attentions=output_attentions,
609
+ past_key_value=self_attn_past_key_value,
610
+ training=training,
611
+ )
612
+ attention_output = self_attention_outputs[0]
613
+
614
+ # if decoder, the last output is tuple of self-attn cache
615
+ if self.is_decoder:
616
+ outputs = self_attention_outputs[1:-1]
617
+ present_key_value = self_attention_outputs[-1]
618
+ else:
619
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
620
+
621
+ cross_attn_present_key_value = None
622
+ if self.is_decoder and encoder_hidden_states is not None:
623
+ if not hasattr(self, "crossattention"):
624
+ raise AttributeError(
625
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated"
626
+ " with cross-attention layers by setting `config.add_cross_attention=True`"
627
+ )
628
+
629
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
630
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
631
+ cross_attention_outputs = self.crossattention(
632
+ attention_output,
633
+ attention_mask,
634
+ head_mask,
635
+ encoder_hidden_states,
636
+ encoder_attention_mask,
637
+ cross_attn_past_key_value,
638
+ output_attentions,
639
+ training=training,
640
+ )
641
+ attention_output = cross_attention_outputs[0]
642
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
643
+
644
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
645
+ cross_attn_present_key_value = cross_attention_outputs[-1]
646
+ present_key_value = present_key_value + cross_attn_present_key_value
647
+
648
+ layernorm_output = self.LayerNorm(attention_output)
649
+ intermediate_output = self.intermediate(hidden_states=layernorm_output)
650
+ layer_output = self.output_layer(
651
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
652
+ )
653
+ outputs = (layer_output,) + outputs # add attentions if we output them
654
+
655
+ # if decoder, return the attn key/values as the last output
656
+ if self.is_decoder:
657
+ outputs = outputs + (present_key_value,)
658
+
659
+ return outputs
660
+
661
+ def build(self, input_shape=None):
662
+ if self.built:
663
+ return
664
+ self.built = True
665
+ if getattr(self, "attention", None) is not None:
666
+ with tf.name_scope(self.attention.name):
667
+ self.attention.build(None)
668
+ if getattr(self, "intermediate", None) is not None:
669
+ with tf.name_scope(self.intermediate.name):
670
+ self.intermediate.build(None)
671
+ if getattr(self, "output_layer", None) is not None:
672
+ with tf.name_scope(self.output_layer.name):
673
+ self.output_layer.build(None)
674
+ if getattr(self, "LayerNorm", None) is not None:
675
+ with tf.name_scope(self.LayerNorm.name):
676
+ self.LayerNorm.build([None, None, self.config.hidden_size])
677
+
678
+
679
+ class TFEsmEncoder(keras.layers.Layer):
680
+ def __init__(self, config, name=None):
681
+ super().__init__(name=name)
682
+ self.config = config
683
+ self.layer = [TFEsmLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
684
+ self.emb_layer_norm_after = keras.layers.LayerNormalization(
685
+ epsilon=config.layer_norm_eps, name="emb_layer_norm_after"
686
+ )
687
+
688
+ def call(
689
+ self,
690
+ hidden_states,
691
+ attention_mask=None,
692
+ head_mask=None,
693
+ encoder_hidden_states=None,
694
+ encoder_attention_mask=None,
695
+ past_key_values=None,
696
+ use_cache=None,
697
+ output_attentions=False,
698
+ output_hidden_states=False,
699
+ return_dict=True,
700
+ training=False,
701
+ ):
702
+ all_hidden_states = () if output_hidden_states else None
703
+ all_self_attentions = () if output_attentions else None
704
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
705
+
706
+ next_decoder_cache = () if use_cache else None
707
+ for i, layer_module in enumerate(self.layer):
708
+ if output_hidden_states:
709
+ all_hidden_states = all_hidden_states + (hidden_states,)
710
+
711
+ layer_head_mask = head_mask[i] if head_mask is not None else None
712
+ past_key_value = past_key_values[i] if past_key_values is not None else None
713
+
714
+ layer_outputs = layer_module(
715
+ hidden_states,
716
+ attention_mask,
717
+ layer_head_mask,
718
+ encoder_hidden_states,
719
+ encoder_attention_mask,
720
+ past_key_value,
721
+ output_attentions,
722
+ training,
723
+ )
724
+
725
+ hidden_states = layer_outputs[0]
726
+ if use_cache:
727
+ next_decoder_cache += (layer_outputs[-1],)
728
+ if output_attentions:
729
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
730
+ if self.config.add_cross_attention:
731
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
732
+
733
+ if self.emb_layer_norm_after:
734
+ hidden_states = self.emb_layer_norm_after(hidden_states)
735
+
736
+ if output_hidden_states:
737
+ all_hidden_states = all_hidden_states + (hidden_states,)
738
+
739
+ if not return_dict:
740
+ return tuple(
741
+ v
742
+ for v in [
743
+ hidden_states,
744
+ next_decoder_cache,
745
+ all_hidden_states,
746
+ all_self_attentions,
747
+ all_cross_attentions,
748
+ ]
749
+ if v is not None
750
+ )
751
+ return TFBaseModelOutputWithPastAndCrossAttentions(
752
+ last_hidden_state=hidden_states,
753
+ past_key_values=next_decoder_cache,
754
+ hidden_states=all_hidden_states,
755
+ attentions=all_self_attentions,
756
+ cross_attentions=all_cross_attentions,
757
+ )
758
+
759
+ def build(self, input_shape=None):
760
+ if self.built:
761
+ return
762
+ self.built = True
763
+ if getattr(self, "emb_layer_norm_after", None) is not None:
764
+ with tf.name_scope(self.emb_layer_norm_after.name):
765
+ self.emb_layer_norm_after.build([None, None, self.config.hidden_size])
766
+ if getattr(self, "layer", None) is not None:
767
+ for layer in self.layer:
768
+ with tf.name_scope(layer.name):
769
+ layer.build(None)
770
+
771
+
772
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Esm
773
+ class TFEsmPooler(keras.layers.Layer):
774
+ def __init__(self, config: EsmConfig, **kwargs):
775
+ super().__init__(**kwargs)
776
+
777
+ self.dense = keras.layers.Dense(
778
+ units=config.hidden_size,
779
+ kernel_initializer=get_initializer(config.initializer_range),
780
+ activation="tanh",
781
+ name="dense",
782
+ )
783
+ self.config = config
784
+
785
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
786
+ # We "pool" the model by simply taking the hidden state corresponding
787
+ # to the first token.
788
+ first_token_tensor = hidden_states[:, 0]
789
+ pooled_output = self.dense(inputs=first_token_tensor)
790
+
791
+ return pooled_output
792
+
793
+ def build(self, input_shape=None):
794
+ if self.built:
795
+ return
796
+ self.built = True
797
+ if getattr(self, "dense", None) is not None:
798
+ with tf.name_scope(self.dense.name):
799
+ self.dense.build([None, None, self.config.hidden_size])
800
+
801
+
802
+ class TFEsmPreTrainedModel(TFPreTrainedModel):
803
+ """
804
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
805
+ models.
806
+ """
807
+
808
+ config_class = EsmConfig
809
+ base_model_prefix = "esm"
810
+
811
+
812
+ ESM_START_DOCSTRING = r"""
813
+
814
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
815
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
816
+ etc.)
817
+
818
+ This model is also a Keras [Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a
819
+ regular Keras model and refer to the TF/Keras documentation for all matters related to general usage and behavior.
820
+
821
+ Parameters:
822
+ config ([`EsmConfig`]): Model configuration class with all the parameters of the
823
+ model. Initializing with a config file does not load the weights associated with the model, only the
824
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
825
+ """
826
+
827
+ ESM_INPUTS_DOCSTRING = r"""
828
+ Args:
829
+ input_ids (`tf.Tensor` of shape `({0})`):
830
+ Indices of input sequence tokens in the vocabulary.
831
+
832
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
833
+ [`PreTrainedTokenizer.__call__`] for details.
834
+
835
+ [What are input IDs?](../glossary#input-ids)
836
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
837
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
838
+
839
+ - 1 for tokens that are **not masked**,
840
+ - 0 for tokens that are **masked**.
841
+
842
+ [What are attention masks?](../glossary#attention-mask)
843
+ position_ids (`tf.Tensor` of shape `({0})`, *optional*):
844
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
845
+ config.max_position_embeddings - 1]`.
846
+
847
+ [What are position IDs?](../glossary#position-ids)
848
+ head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
849
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
850
+
851
+ - 1 indicates the head is **not masked**,
852
+ - 0 indicates the head is **masked**.
853
+
854
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
855
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
856
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
857
+ model's internal embedding lookup matrix.
858
+ output_attentions (`bool`, *optional*):
859
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
860
+ tensors for more detail.
861
+ output_hidden_states (`bool`, *optional*):
862
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
863
+ more detail.
864
+ return_dict (`bool`, *optional*):
865
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
866
+ """
867
+
868
+
869
+ @add_start_docstrings(
870
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
871
+ ESM_START_DOCSTRING,
872
+ )
873
+ class TFEsmMainLayer(keras.layers.Layer):
874
+ """
875
+
876
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
877
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
878
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
879
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
880
+
881
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
882
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
883
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
884
+ """
885
+
886
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
887
+
888
+ def __init__(self, config, add_pooling_layer=True, name=None, **kwargs):
889
+ super().__init__(name=name, **kwargs)
890
+
891
+ self.config = config
892
+ self.is_decoder = config.is_decoder
893
+
894
+ self.embeddings = TFEsmEmbeddings(config, name="embeddings")
895
+ self.encoder = TFEsmEncoder(config, name="encoder")
896
+ self.pooler = TFEsmPooler(config, name="pooler") if add_pooling_layer else None
897
+
898
+ self.contact_head = TFEsmContactPredictionHead(
899
+ in_features=self.config.num_hidden_layers * self.config.num_attention_heads, bias=True, name="contact_head"
900
+ )
901
+
902
+ def build(self, input_shape=None):
903
+ if self.built:
904
+ return
905
+ self.built = True
906
+ if getattr(self, "embeddings", None) is not None:
907
+ with tf.name_scope(self.embeddings.name):
908
+ self.embeddings.build(None)
909
+ if getattr(self, "encoder", None) is not None:
910
+ with tf.name_scope(self.encoder.name):
911
+ self.encoder.build(None)
912
+ if getattr(self, "pooler", None) is not None:
913
+ with tf.name_scope(self.pooler.name):
914
+ self.pooler.build(None)
915
+ if getattr(self, "contact_head", None) is not None:
916
+ with tf.name_scope(self.contact_head.name):
917
+ self.contact_head.build(None)
918
+
919
+ def get_input_embeddings(self):
920
+ return self.embeddings.word_embeddings
921
+
922
+ def set_input_embeddings(self, value: tf.Variable):
923
+ self.embeddings.word_embeddings.weight = value
924
+ self.embeddings.vocab_size = shape_list(value)[0]
925
+
926
+ def _prune_heads(self, heads_to_prune):
927
+ raise NotImplementedError
928
+
929
+ def call(
930
+ self,
931
+ input_ids: TFModelInputType | None = None,
932
+ attention_mask: np.ndarray | tf.Tensor | None = None,
933
+ position_ids: np.ndarray | tf.Tensor | None = None,
934
+ head_mask: np.ndarray | tf.Tensor | None = None,
935
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
936
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
937
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
938
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
939
+ use_cache: Optional[bool] = None,
940
+ output_attentions: Optional[bool] = None,
941
+ output_hidden_states: Optional[bool] = None,
942
+ return_dict: Optional[bool] = None,
943
+ training: bool = False,
944
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
945
+ if not self.config.is_decoder:
946
+ use_cache = False
947
+
948
+ if input_ids is not None and inputs_embeds is not None:
949
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
950
+ elif input_ids is not None:
951
+ input_shape = shape_list(input_ids)
952
+ elif inputs_embeds is not None:
953
+ input_shape = shape_list(inputs_embeds)[:-1]
954
+ else:
955
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
956
+
957
+ batch_size, seq_length = input_shape
958
+
959
+ if past_key_values is None:
960
+ past_key_values_length = 0
961
+ past_key_values = [None] * len(self.encoder.layer)
962
+ else:
963
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
964
+
965
+ if attention_mask is None:
966
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
967
+
968
+ embedding_output = self.embeddings(
969
+ input_ids=input_ids,
970
+ attention_mask=attention_mask,
971
+ position_ids=position_ids,
972
+ inputs_embeds=inputs_embeds,
973
+ past_key_values_length=past_key_values_length,
974
+ training=training,
975
+ )
976
+
977
+ # We create a 3D attention mask from a 2D tensor mask.
978
+ # Sizes are [batch_size, 1, 1, to_seq_length]
979
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
980
+ # this attention mask is more simple than the triangular masking of causal attention
981
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
982
+ attention_mask_shape = shape_list(attention_mask)
983
+
984
+ mask_seq_length = seq_length + past_key_values_length
985
+ # Copied from `modeling_tf_t5.py`
986
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
987
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
988
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
989
+ if self.is_decoder:
990
+ seq_ids = tf.range(mask_seq_length)
991
+ causal_mask = tf.less_equal(
992
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
993
+ seq_ids[None, :, None],
994
+ )
995
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
996
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
997
+ attention_mask_shape = shape_list(extended_attention_mask)
998
+ extended_attention_mask = tf.reshape(
999
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
1000
+ )
1001
+ if past_key_values[0] is not None:
1002
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
1003
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
1004
+ else:
1005
+ extended_attention_mask = tf.reshape(
1006
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
1007
+ )
1008
+
1009
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
1010
+ # masked positions, this operation will create a tensor which is 0.0 for
1011
+ # positions we want to attend and -10000.0 for masked positions.
1012
+ # Since we are adding it to the raw scores before the softmax, this is
1013
+ # effectively the same as removing these entirely.
1014
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
1015
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
1016
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
1017
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
1018
+
1019
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
1020
+ if self.is_decoder and encoder_attention_mask is not None:
1021
+ # If a 2D ou 3D attention mask is provided for the cross-attention
1022
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
1023
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1024
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
1025
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
1026
+ if num_dims_encoder_attention_mask == 3:
1027
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
1028
+ if num_dims_encoder_attention_mask == 2:
1029
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
1030
+
1031
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
1032
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
1033
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
1034
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
1035
+
1036
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
1037
+ else:
1038
+ encoder_extended_attention_mask = None
1039
+
1040
+ # Prepare head mask if needed
1041
+ # 1.0 in head_mask indicate we keep the head
1042
+ # attention_probs has shape bsz x n_heads x N x N
1043
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1044
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1045
+ if head_mask is not None:
1046
+ raise NotImplementedError
1047
+ else:
1048
+ head_mask = [None] * self.config.num_hidden_layers
1049
+
1050
+ encoder_outputs = self.encoder(
1051
+ hidden_states=embedding_output,
1052
+ attention_mask=extended_attention_mask,
1053
+ head_mask=head_mask,
1054
+ encoder_hidden_states=encoder_hidden_states,
1055
+ encoder_attention_mask=encoder_extended_attention_mask,
1056
+ past_key_values=past_key_values,
1057
+ use_cache=use_cache,
1058
+ output_attentions=output_attentions,
1059
+ output_hidden_states=output_hidden_states,
1060
+ return_dict=return_dict,
1061
+ training=training,
1062
+ )
1063
+
1064
+ sequence_output = encoder_outputs[0]
1065
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
1066
+
1067
+ if not return_dict:
1068
+ return (
1069
+ sequence_output,
1070
+ pooled_output,
1071
+ ) + encoder_outputs[1:]
1072
+
1073
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
1074
+ last_hidden_state=sequence_output,
1075
+ pooler_output=pooled_output,
1076
+ past_key_values=encoder_outputs.past_key_values,
1077
+ hidden_states=encoder_outputs.hidden_states,
1078
+ attentions=encoder_outputs.attentions,
1079
+ cross_attentions=encoder_outputs.cross_attentions,
1080
+ )
1081
+
1082
+ def predict_contacts(self, tokens, attention_mask):
1083
+ attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions
1084
+ attns = tf.stack(attns, axis=1) # Matches the original model layout
1085
+ # In the original model, attentions for padding tokens are completely zeroed out.
1086
+ # This makes no difference most of the time because the other tokens won't attend to them,
1087
+ # but it does for the contact prediction task, which takes attentions as input,
1088
+ # so we have to mimic that here.
1089
+ attention_mask = tf.cast(attention_mask, attns.dtype)
1090
+ attns *= attention_mask[:, None, None, None]
1091
+ attns *= attention_mask[:, None, None, :, None]
1092
+ return self.contact_head(tokens, attns)
1093
+
1094
+
1095
+ @add_start_docstrings(
1096
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
1097
+ ESM_START_DOCSTRING,
1098
+ )
1099
+ class TFEsmModel(TFEsmPreTrainedModel):
1100
+ def __init__(self, config: EsmConfig, add_pooling_layer=True, *inputs, **kwargs):
1101
+ super().__init__(config, *inputs, **kwargs)
1102
+
1103
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=add_pooling_layer, name="esm")
1104
+
1105
+ @unpack_inputs
1106
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1107
+ @add_code_sample_docstrings(
1108
+ checkpoint=_CHECKPOINT_FOR_DOC,
1109
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
1110
+ config_class=_CONFIG_FOR_DOC,
1111
+ )
1112
+ def call(
1113
+ self,
1114
+ input_ids: TFModelInputType | None = None,
1115
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1116
+ position_ids: np.ndarray | tf.Tensor | None = None,
1117
+ head_mask: np.ndarray | tf.Tensor | None = None,
1118
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1119
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1120
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1121
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1122
+ use_cache: Optional[bool] = None,
1123
+ output_attentions: Optional[bool] = None,
1124
+ output_hidden_states: Optional[bool] = None,
1125
+ return_dict: Optional[bool] = None,
1126
+ training: Optional[bool] = False,
1127
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
1128
+ r"""
1129
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1130
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
1131
+ the model is configured as a decoder.
1132
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1133
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
1134
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
1135
+
1136
+ - 1 for tokens that are **not masked**,
1137
+ - 0 for tokens that are **masked**.
1138
+
1139
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
1140
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
1141
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
1142
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
1143
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
1144
+ use_cache (`bool`, *optional*, defaults to `True`):
1145
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1146
+ `past_key_values`). Set to `False` during training, `True` during generation
1147
+ """
1148
+ outputs = self.esm(
1149
+ input_ids=input_ids,
1150
+ attention_mask=attention_mask,
1151
+ position_ids=position_ids,
1152
+ head_mask=head_mask,
1153
+ inputs_embeds=inputs_embeds,
1154
+ encoder_hidden_states=encoder_hidden_states,
1155
+ encoder_attention_mask=encoder_attention_mask,
1156
+ past_key_values=past_key_values,
1157
+ use_cache=use_cache,
1158
+ output_attentions=output_attentions,
1159
+ output_hidden_states=output_hidden_states,
1160
+ return_dict=return_dict,
1161
+ training=training,
1162
+ )
1163
+ return outputs
1164
+
1165
+ def predict_contacts(self, tokens, attention_mask):
1166
+ return self.esm.predict_contacts(tokens, attention_mask)
1167
+
1168
+ def build(self, input_shape=None):
1169
+ if self.built:
1170
+ return
1171
+ self.built = True
1172
+ if getattr(self, "esm", None) is not None:
1173
+ with tf.name_scope(self.esm.name):
1174
+ self.esm.build(None)
1175
+
1176
+
1177
+ @add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING)
1178
+ class TFEsmForMaskedLM(TFEsmPreTrainedModel, TFMaskedLanguageModelingLoss):
1179
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1180
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1181
+
1182
+ def __init__(self, config):
1183
+ super().__init__(config)
1184
+
1185
+ if config.is_decoder:
1186
+ logger.warning(
1187
+ "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for "
1188
+ "bi-directional self-attention."
1189
+ )
1190
+
1191
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
1192
+ self.lm_head = TFEsmLMHead(config, name="lm_head")
1193
+ if config.tie_word_embeddings:
1194
+ # Ensure word embeddings are built so that we actually have something to tie
1195
+ with tf.name_scope(os.path.join(self._name_scope(), "esm", "embeddings", "word_embeddings")):
1196
+ self.esm.embeddings.word_embeddings.build((None, None))
1197
+ self.lm_head.decoder = self.esm.embeddings.word_embeddings.weights[0]
1198
+
1199
+ def get_output_embeddings(self):
1200
+ return self.lm_head.decoder
1201
+
1202
+ def set_output_embeddings(self, new_embeddings):
1203
+ self.lm_head.decoder = new_embeddings
1204
+
1205
+ def get_lm_head(self):
1206
+ return self.lm_head
1207
+
1208
+ @unpack_inputs
1209
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1210
+ @add_code_sample_docstrings(
1211
+ checkpoint=_CHECKPOINT_FOR_DOC,
1212
+ output_type=TFMaskedLMOutput,
1213
+ config_class=_CONFIG_FOR_DOC,
1214
+ mask="<mask>",
1215
+ )
1216
+ def call(
1217
+ self,
1218
+ input_ids: TFModelInputType | None = None,
1219
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1220
+ position_ids: np.ndarray | tf.Tensor | None = None,
1221
+ head_mask: np.ndarray | tf.Tensor | None = None,
1222
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1223
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
1224
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
1225
+ labels: np.ndarray | tf.Tensor | None = None,
1226
+ output_attentions: Optional[bool] = None,
1227
+ output_hidden_states: Optional[bool] = None,
1228
+ return_dict: Optional[bool] = None,
1229
+ training: bool = False,
1230
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
1231
+ r"""
1232
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1233
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1234
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1235
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1236
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
1237
+ Used to hide legacy arguments that have been deprecated.
1238
+ """
1239
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1240
+
1241
+ outputs = self.esm(
1242
+ input_ids,
1243
+ attention_mask=attention_mask,
1244
+ position_ids=position_ids,
1245
+ head_mask=head_mask,
1246
+ inputs_embeds=inputs_embeds,
1247
+ encoder_hidden_states=encoder_hidden_states,
1248
+ encoder_attention_mask=encoder_attention_mask,
1249
+ output_attentions=output_attentions,
1250
+ output_hidden_states=output_hidden_states,
1251
+ return_dict=return_dict,
1252
+ training=training,
1253
+ )
1254
+ sequence_output = outputs[0]
1255
+ prediction_scores = self.lm_head(sequence_output)
1256
+
1257
+ masked_lm_loss = None
1258
+ if labels is not None:
1259
+ masked_lm_loss = self.hf_compute_loss(labels=labels, logits=prediction_scores)
1260
+
1261
+ if not return_dict:
1262
+ output = (prediction_scores,) + outputs[2:]
1263
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1264
+
1265
+ return TFMaskedLMOutput(
1266
+ loss=masked_lm_loss,
1267
+ logits=prediction_scores,
1268
+ hidden_states=outputs.hidden_states,
1269
+ attentions=outputs.attentions,
1270
+ )
1271
+
1272
+ def predict_contacts(self, tokens, attention_mask):
1273
+ return self.esm.predict_contacts(tokens, attention_mask)
1274
+
1275
+ def build(self, input_shape=None):
1276
+ if self.built:
1277
+ return
1278
+ self.built = True
1279
+ if getattr(self, "esm", None) is not None:
1280
+ with tf.name_scope(self.esm.name):
1281
+ self.esm.build(None)
1282
+ if getattr(self, "lm_head", None) is not None:
1283
+ with tf.name_scope(self.lm_head.name):
1284
+ self.lm_head.build(None)
1285
+
1286
+
1287
+ class TFEsmLMHead(keras.layers.Layer):
1288
+ """ESM Head for masked language modeling."""
1289
+
1290
+ def __init__(self, config, name=None):
1291
+ super().__init__(name=name)
1292
+ self.dense = keras.layers.Dense(
1293
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
1294
+ )
1295
+
1296
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
1297
+ if config.tie_word_embeddings:
1298
+ self.decoder = None
1299
+ else:
1300
+ self.decoder = keras.layers.Dense(
1301
+ config.vocab_size,
1302
+ kernel_initializer=get_initializer(config.initializer_range),
1303
+ name="decoder",
1304
+ use_bias=False,
1305
+ )
1306
+ self.config = config
1307
+
1308
+ def build(self, input_shape=None):
1309
+ # Separate bias to match the PT model and allow weight cross-loading to work
1310
+ # Put it in the build so it gets the right name when adding it as a weight
1311
+ if self.built:
1312
+ return
1313
+ self.built = True
1314
+ self.bias = self.add_weight("bias", shape=(self.config.vocab_size,), initializer="zeros", trainable=True)
1315
+ if getattr(self, "dense", None) is not None:
1316
+ with tf.name_scope(self.dense.name):
1317
+ self.dense.build([None, None, self.config.hidden_size])
1318
+ if getattr(self, "layer_norm", None) is not None:
1319
+ with tf.name_scope(self.layer_norm.name):
1320
+ self.layer_norm.build([None, None, self.config.hidden_size])
1321
+ if getattr(self, "decoder", None) is not None and not self.config.tie_word_embeddings:
1322
+ with tf.name_scope(self.decoder.name):
1323
+ self.decoder.build([None, None, self.config.hidden_size])
1324
+
1325
+ def get_bias(self):
1326
+ return {"bias": self.bias}
1327
+
1328
+ def call(self, features):
1329
+ x = self.dense(features)
1330
+ x = tf.nn.gelu(x)
1331
+ x = self.layer_norm(x)
1332
+
1333
+ # project back to size of vocabulary with bias
1334
+ if self.config.tie_word_embeddings:
1335
+ x = tf.matmul(x, self.decoder, transpose_b=True) + self.bias
1336
+ else:
1337
+ x = self.decoder(x) + self.bias
1338
+ return x
1339
+
1340
+
1341
+ @add_start_docstrings(
1342
+ """
1343
+ ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
1344
+ output) e.g. for GLUE tasks.
1345
+ """,
1346
+ ESM_START_DOCSTRING,
1347
+ )
1348
+ class TFEsmForSequenceClassification(TFEsmPreTrainedModel, TFSequenceClassificationLoss):
1349
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1350
+
1351
+ def __init__(self, config):
1352
+ super().__init__(config)
1353
+ self.num_labels = config.num_labels
1354
+ self.config = config
1355
+
1356
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
1357
+ self.classifier = TFEsmClassificationHead(config, name="classifier")
1358
+
1359
+ @unpack_inputs
1360
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1361
+ @add_code_sample_docstrings(
1362
+ checkpoint=_CHECKPOINT_FOR_DOC,
1363
+ output_type=TFSequenceClassifierOutput,
1364
+ config_class=_CONFIG_FOR_DOC,
1365
+ )
1366
+ def call(
1367
+ self,
1368
+ input_ids: TFModelInputType | None = None,
1369
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1370
+ position_ids: np.ndarray | tf.Tensor | None = None,
1371
+ head_mask: np.ndarray | tf.Tensor | None = None,
1372
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1373
+ labels: np.ndarray | tf.Tensor | None = None,
1374
+ output_attentions: Optional[bool] = None,
1375
+ output_hidden_states: Optional[bool] = None,
1376
+ return_dict: Optional[bool] = None,
1377
+ training: bool = False,
1378
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
1379
+ r"""
1380
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1381
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1382
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1383
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1384
+ """
1385
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1386
+
1387
+ outputs = self.esm(
1388
+ input_ids,
1389
+ attention_mask=attention_mask,
1390
+ position_ids=position_ids,
1391
+ head_mask=head_mask,
1392
+ inputs_embeds=inputs_embeds,
1393
+ output_attentions=output_attentions,
1394
+ output_hidden_states=output_hidden_states,
1395
+ return_dict=return_dict,
1396
+ training=training,
1397
+ )
1398
+ sequence_output = outputs[0]
1399
+ logits = self.classifier(sequence_output)
1400
+
1401
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1402
+
1403
+ if not return_dict:
1404
+ output = (logits,) + outputs[2:]
1405
+ return ((loss,) + output) if loss is not None else output
1406
+
1407
+ return TFSequenceClassifierOutput(
1408
+ loss=loss,
1409
+ logits=logits,
1410
+ hidden_states=outputs.hidden_states,
1411
+ attentions=outputs.attentions,
1412
+ )
1413
+
1414
+ def build(self, input_shape=None):
1415
+ if self.built:
1416
+ return
1417
+ self.built = True
1418
+ if getattr(self, "esm", None) is not None:
1419
+ with tf.name_scope(self.esm.name):
1420
+ self.esm.build(None)
1421
+ if getattr(self, "classifier", None) is not None:
1422
+ with tf.name_scope(self.classifier.name):
1423
+ self.classifier.build(None)
1424
+
1425
+
1426
+ @add_start_docstrings(
1427
+ """
1428
+ ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1429
+ Named-Entity-Recognition (NER) tasks.
1430
+ """,
1431
+ ESM_START_DOCSTRING,
1432
+ )
1433
+ class TFEsmForTokenClassification(TFEsmPreTrainedModel, TFTokenClassificationLoss):
1434
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1435
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1436
+
1437
+ def __init__(self, config):
1438
+ super().__init__(config)
1439
+ self.num_labels = config.num_labels
1440
+
1441
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
1442
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1443
+ self.classifier = keras.layers.Dense(config.num_labels, name="classifier")
1444
+ self.config = config
1445
+
1446
+ @unpack_inputs
1447
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1448
+ @add_code_sample_docstrings(
1449
+ checkpoint=_CHECKPOINT_FOR_DOC,
1450
+ output_type=TFTokenClassifierOutput,
1451
+ config_class=_CONFIG_FOR_DOC,
1452
+ )
1453
+ def call(
1454
+ self,
1455
+ input_ids: TFModelInputType | None = None,
1456
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1457
+ position_ids: np.ndarray | tf.Tensor | None = None,
1458
+ head_mask: np.ndarray | tf.Tensor | None = None,
1459
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1460
+ labels: np.ndarray | tf.Tensor | None = None,
1461
+ output_attentions: Optional[bool] = None,
1462
+ output_hidden_states: Optional[bool] = None,
1463
+ return_dict: Optional[bool] = None,
1464
+ training: bool = False,
1465
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
1466
+ r"""
1467
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1468
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1469
+ """
1470
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1471
+
1472
+ outputs = self.esm(
1473
+ input_ids,
1474
+ attention_mask=attention_mask,
1475
+ position_ids=position_ids,
1476
+ head_mask=head_mask,
1477
+ inputs_embeds=inputs_embeds,
1478
+ output_attentions=output_attentions,
1479
+ output_hidden_states=output_hidden_states,
1480
+ return_dict=return_dict,
1481
+ training=training,
1482
+ )
1483
+
1484
+ sequence_output = outputs[0]
1485
+
1486
+ sequence_output = self.dropout(sequence_output, training=training)
1487
+ logits = self.classifier(sequence_output)
1488
+
1489
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
1490
+
1491
+ if not return_dict:
1492
+ output = (logits,) + outputs[2:]
1493
+ return ((loss,) + output) if loss is not None else output
1494
+
1495
+ return TFTokenClassifierOutput(
1496
+ loss=loss,
1497
+ logits=logits,
1498
+ hidden_states=outputs.hidden_states,
1499
+ attentions=outputs.attentions,
1500
+ )
1501
+
1502
+ def build(self, input_shape=None):
1503
+ if self.built:
1504
+ return
1505
+ self.built = True
1506
+ if getattr(self, "esm", None) is not None:
1507
+ with tf.name_scope(self.esm.name):
1508
+ self.esm.build(None)
1509
+ if getattr(self, "classifier", None) is not None:
1510
+ with tf.name_scope(self.classifier.name):
1511
+ self.classifier.build([None, None, self.config.hidden_size])
1512
+
1513
+
1514
+ class TFEsmClassificationHead(keras.layers.Layer):
1515
+ """Head for sentence-level classification tasks."""
1516
+
1517
+ def __init__(self, config, name=None):
1518
+ super().__init__(name=name)
1519
+ self.dense = keras.layers.Dense(
1520
+ config.hidden_size,
1521
+ kernel_initializer=get_initializer(config.initializer_range),
1522
+ activation="tanh",
1523
+ name="dense",
1524
+ )
1525
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
1526
+ self.out_proj = keras.layers.Dense(
1527
+ config.num_labels,
1528
+ kernel_initializer=get_initializer(config.initializer_range),
1529
+ activation="linear",
1530
+ name="out_proj",
1531
+ )
1532
+ self.config = config
1533
+
1534
+ def call(self, features, training=False):
1535
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1536
+ x = self.dropout(x, training=training)
1537
+ x = self.dense(x)
1538
+ x = self.dropout(x, training=training)
1539
+ x = self.out_proj(x)
1540
+ return x
1541
+
1542
+ def build(self, input_shape=None):
1543
+ if self.built:
1544
+ return
1545
+ self.built = True
1546
+ if getattr(self, "dense", None) is not None:
1547
+ with tf.name_scope(self.dense.name):
1548
+ self.dense.build([None, None, self.config.hidden_size])
1549
+ if getattr(self, "out_proj", None) is not None:
1550
+ with tf.name_scope(self.out_proj.name):
1551
+ self.out_proj.build([None, None, self.config.hidden_size])
1552
+
1553
+
1554
+ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
1555
+ """
1556
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
1557
+ are ignored. This is modified from fairseq's `utils.make_positions`.
1558
+
1559
+ Args:
1560
+ x: tf.Tensor x:
1561
+
1562
+ Returns: tf.Tensor
1563
+ """
1564
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
1565
+ mask = tf.cast(input_ids != padding_idx, tf.int64)
1566
+ incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask
1567
+ return incremental_indices + padding_idx
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from .chunk_utils import chunk_layer
2
+ from .data_transforms import make_atom14_masks
3
+ from .feats import atom14_to_atom37, frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames
4
+ from .loss import compute_predicted_aligned_error, compute_tm
5
+ from .protein import Protein as OFProtein
6
+ from .protein import to_pdb
7
+ from .rigid_utils import Rigid, Rotation
8
+ from .tensor_utils import dict_multimap, flatten_final_dims, permute_final_dims
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (775 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/chunk_utils.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/data_transforms.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/feats.cpython-310.pyc ADDED
Binary file (6.22 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/loss.cpython-310.pyc ADDED
Binary file (2.83 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/protein.cpython-310.pyc ADDED
Binary file (7.97 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/residue_constants.cpython-310.pyc ADDED
Binary file (24.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/rigid_utils.cpython-310.pyc ADDED
Binary file (38.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/tensor_utils.cpython-310.pyc ADDED
Binary file (4.96 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/chunk_utils.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 AlQuraishi Laboratory
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import logging
15
+ import math
16
+ from functools import partial
17
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
18
+
19
+ import torch
20
+
21
+ from .tensor_utils import tensor_tree_map, tree_map
22
+
23
+
24
+ def _fetch_dims(tree: Union[dict, list, tuple, torch.Tensor]) -> List[Tuple[int, ...]]:
25
+ shapes = []
26
+ if isinstance(tree, dict):
27
+ for v in tree.values():
28
+ shapes.extend(_fetch_dims(v))
29
+ elif isinstance(tree, (list, tuple)):
30
+ for t in tree:
31
+ shapes.extend(_fetch_dims(t))
32
+ elif isinstance(tree, torch.Tensor):
33
+ shapes.append(tree.shape)
34
+ else:
35
+ raise ValueError("Not supported")
36
+
37
+ return shapes
38
+
39
+
40
+ @torch.jit.ignore
41
+ def _flat_idx_to_idx(flat_idx: int, dims: Tuple[int, ...]) -> Tuple[int, ...]:
42
+ idx = []
43
+ for d in reversed(dims):
44
+ idx.append(flat_idx % d)
45
+ flat_idx = flat_idx // d
46
+
47
+ return tuple(reversed(idx))
48
+
49
+
50
+ @torch.jit.ignore
51
+ def _get_minimal_slice_set(
52
+ start: Sequence[int],
53
+ end: Sequence[int],
54
+ dims: Sequence[int],
55
+ start_edges: Optional[Sequence[bool]] = None,
56
+ end_edges: Optional[Sequence[bool]] = None,
57
+ ) -> List[Tuple[slice, ...]]:
58
+ """
59
+ Produces an ordered sequence of tensor slices that, when used in sequence on a tensor with shape dims, yields
60
+ tensors that contain every leaf in the contiguous range [start, end]. Care is taken to yield a short sequence of
61
+ slices, and perhaps even the shortest possible (I'm pretty sure it's the latter).
62
+
63
+ end is INCLUSIVE.
64
+ """
65
+
66
+ # start_edges and end_edges both indicate whether, starting from any given
67
+ # dimension, the start/end index is at the top/bottom edge of the
68
+ # corresponding tensor, modeled as a tree
69
+ def reduce_edge_list(l: List[bool]) -> None:
70
+ tally = True
71
+ for i in range(len(l)):
72
+ reversed_idx = -1 * (i + 1)
73
+ l[reversed_idx] &= tally
74
+ tally = l[reversed_idx]
75
+
76
+ if start_edges is None:
77
+ start_edges = [s == 0 for s in start]
78
+ reduce_edge_list(start_edges)
79
+ if end_edges is None:
80
+ end_edges = [e == (d - 1) for e, d in zip(end, dims)]
81
+ reduce_edge_list(end_edges)
82
+
83
+ # Base cases. Either start/end are empty and we're done, or the final,
84
+ # one-dimensional tensor can be simply sliced
85
+ if len(start) == 0:
86
+ return [()]
87
+ elif len(start) == 1:
88
+ return [(slice(start[0], end[0] + 1),)]
89
+
90
+ slices: List[Tuple[slice, ...]] = []
91
+ path_list: List[slice] = []
92
+
93
+ # Dimensions common to start and end can be selected directly
94
+ for s, e in zip(start, end):
95
+ if s == e:
96
+ path_list.append(slice(s, s + 1))
97
+ else:
98
+ break
99
+
100
+ path: Tuple[slice, ...] = tuple(path_list)
101
+ divergence_idx = len(path)
102
+
103
+ # start == end, and we're done
104
+ if divergence_idx == len(dims):
105
+ return [path]
106
+
107
+ def upper() -> Tuple[Tuple[slice, ...], ...]:
108
+ assert start_edges is not None
109
+ assert end_edges is not None
110
+
111
+ sdi = start[divergence_idx]
112
+ return tuple(
113
+ path + (slice(sdi, sdi + 1),) + s
114
+ for s in _get_minimal_slice_set(
115
+ start[divergence_idx + 1 :],
116
+ [d - 1 for d in dims[divergence_idx + 1 :]],
117
+ dims[divergence_idx + 1 :],
118
+ start_edges=start_edges[divergence_idx + 1 :],
119
+ end_edges=[True for _ in end_edges[divergence_idx + 1 :]],
120
+ )
121
+ )
122
+
123
+ def lower() -> Tuple[Tuple[slice, ...], ...]:
124
+ assert start_edges is not None
125
+ assert end_edges is not None
126
+
127
+ edi = end[divergence_idx]
128
+ return tuple(
129
+ path + (slice(edi, edi + 1),) + s
130
+ for s in _get_minimal_slice_set(
131
+ [0 for _ in start[divergence_idx + 1 :]],
132
+ end[divergence_idx + 1 :],
133
+ dims[divergence_idx + 1 :],
134
+ start_edges=[True for _ in start_edges[divergence_idx + 1 :]],
135
+ end_edges=end_edges[divergence_idx + 1 :],
136
+ )
137
+ )
138
+
139
+ # If both start and end are at the edges of the subtree rooted at
140
+ # divergence_idx, we can just select the whole subtree at once
141
+ if start_edges[divergence_idx] and end_edges[divergence_idx]:
142
+ slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1),))
143
+ # If just start is at the edge, we can grab almost all of the subtree,
144
+ # treating only the ragged bottom edge as an edge case
145
+ elif start_edges[divergence_idx]:
146
+ slices.append(path + (slice(start[divergence_idx], end[divergence_idx]),))
147
+ slices.extend(lower())
148
+ # Analogous to the previous case, but the top is ragged this time
149
+ elif end_edges[divergence_idx]:
150
+ slices.extend(upper())
151
+ slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1),))
152
+ # If both sides of the range are ragged, we need to handle both sides
153
+ # separately. If there's contiguous meat in between them, we can index it
154
+ # in one big chunk
155
+ else:
156
+ slices.extend(upper())
157
+ middle_ground = end[divergence_idx] - start[divergence_idx]
158
+ if middle_ground > 1:
159
+ slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx]),))
160
+ slices.extend(lower())
161
+
162
+ return slices
163
+
164
+
165
+ @torch.jit.ignore
166
+ def _chunk_slice(t: torch.Tensor, flat_start: int, flat_end: int, no_batch_dims: int) -> torch.Tensor:
167
+ """
168
+ Equivalent to
169
+
170
+ t.reshape((-1,) + t.shape[no_batch_dims:])[flat_start:flat_end]
171
+
172
+ but without the need for the initial reshape call, which can be memory-intensive in certain situations. The only
173
+ reshape operations in this function are performed on sub-tensors that scale with (flat_end - flat_start), the chunk
174
+ size.
175
+ """
176
+
177
+ batch_dims = t.shape[:no_batch_dims]
178
+ start_idx = list(_flat_idx_to_idx(flat_start, batch_dims))
179
+ # _get_minimal_slice_set is inclusive
180
+ end_idx = list(_flat_idx_to_idx(flat_end - 1, batch_dims))
181
+
182
+ # Get an ordered list of slices to perform
183
+ slices = _get_minimal_slice_set(
184
+ start_idx,
185
+ end_idx,
186
+ batch_dims,
187
+ )
188
+
189
+ sliced_tensors = [t[s] for s in slices]
190
+
191
+ return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
192
+
193
+
194
+ def chunk_layer(
195
+ layer: Callable,
196
+ inputs: Dict[str, Any],
197
+ chunk_size: int,
198
+ no_batch_dims: int,
199
+ low_mem: bool = False,
200
+ _out: Any = None,
201
+ _add_into_out: bool = False,
202
+ ) -> Any:
203
+ """
204
+ Implements the "chunking" procedure described in section 1.11.8.
205
+
206
+ Layer outputs and inputs are assumed to be simple "pytrees," consisting only of (arbitrarily nested) lists, tuples,
207
+ and dicts with torch.Tensor leaves.
208
+
209
+ Args:
210
+ layer:
211
+ The layer to be applied chunk-wise
212
+ inputs:
213
+ A (non-nested) dictionary of keyworded inputs. All leaves must be tensors and must share the same batch
214
+ dimensions.
215
+ chunk_size:
216
+ The number of sub-batches per chunk. If multiple batch dimensions are specified, a "sub-batch" is defined
217
+ as a single indexing of all batch dimensions simultaneously (s.t. the number of sub-batches is the product
218
+ of the batch dimensions).
219
+ no_batch_dims:
220
+ How many of the initial dimensions of each input tensor can be considered batch dimensions.
221
+ low_mem:
222
+ Avoids flattening potentially large input tensors. Unnecessary in most cases, and is ever so slightly
223
+ slower than the default setting.
224
+ Returns:
225
+ The reassembled output of the layer on the inputs.
226
+ """
227
+ if not (len(inputs) > 0):
228
+ raise ValueError("Must provide at least one input")
229
+
230
+ initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)]
231
+ orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)])
232
+
233
+ def _prep_inputs(t: torch.Tensor) -> torch.Tensor:
234
+ if not low_mem:
235
+ if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
236
+ t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
237
+ t = t.reshape(-1, *t.shape[no_batch_dims:])
238
+ else:
239
+ t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
240
+ return t
241
+
242
+ prepped_inputs: Dict[str, Any] = tensor_tree_map(_prep_inputs, inputs)
243
+ prepped_outputs = None
244
+ if _out is not None:
245
+ prepped_outputs = tensor_tree_map(lambda t: t.view([-1] + list(t.shape[no_batch_dims:])), _out)
246
+
247
+ flat_batch_dim = 1
248
+ for d in orig_batch_dims:
249
+ flat_batch_dim *= d
250
+
251
+ no_chunks = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
252
+
253
+ def _select_chunk(t: torch.Tensor) -> torch.Tensor:
254
+ return t[i : i + chunk_size] if t.shape[0] != 1 else t
255
+
256
+ i = 0
257
+ out = prepped_outputs
258
+ for _ in range(no_chunks):
259
+ # Chunk the input
260
+ if not low_mem:
261
+ select_chunk = _select_chunk
262
+ else:
263
+ select_chunk = partial(
264
+ _chunk_slice,
265
+ flat_start=i,
266
+ flat_end=min(flat_batch_dim, i + chunk_size),
267
+ no_batch_dims=len(orig_batch_dims),
268
+ )
269
+
270
+ chunks: Dict[str, Any] = tensor_tree_map(select_chunk, prepped_inputs)
271
+
272
+ # Run the layer on the chunk
273
+ output_chunk = layer(**chunks)
274
+
275
+ # Allocate space for the output
276
+ if out is None:
277
+ out = tensor_tree_map(lambda t: t.new_zeros((flat_batch_dim,) + t.shape[1:]), output_chunk)
278
+
279
+ # Put the chunk in its pre-allocated space
280
+ if isinstance(output_chunk, dict):
281
+
282
+ def assign(d1: dict, d2: dict) -> None:
283
+ for k, v in d1.items():
284
+ if isinstance(v, dict):
285
+ assign(v, d2[k])
286
+ else:
287
+ if _add_into_out:
288
+ v[i : i + chunk_size] += d2[k]
289
+ else:
290
+ v[i : i + chunk_size] = d2[k]
291
+
292
+ assign(out, output_chunk)
293
+ elif isinstance(output_chunk, tuple):
294
+ for x1, x2 in zip(out, output_chunk):
295
+ if _add_into_out:
296
+ x1[i : i + chunk_size] += x2
297
+ else:
298
+ x1[i : i + chunk_size] = x2
299
+ elif isinstance(output_chunk, torch.Tensor):
300
+ if _add_into_out:
301
+ out[i : i + chunk_size] += output_chunk
302
+ else:
303
+ out[i : i + chunk_size] = output_chunk
304
+ else:
305
+ raise ValueError("Not supported")
306
+
307
+ i += chunk_size
308
+
309
+ out = tensor_tree_map(lambda t: t.view(orig_batch_dims + t.shape[1:]), out)
310
+
311
+ return out
312
+
313
+
314
+ class ChunkSizeTuner:
315
+ def __init__(
316
+ self,
317
+ # Heuristically, runtimes for most of the modules in the network
318
+ # plateau earlier than this on all GPUs I've run the model on.
319
+ max_chunk_size: int = 512,
320
+ ):
321
+ self.max_chunk_size = max_chunk_size
322
+ self.cached_chunk_size: Optional[int] = None
323
+ self.cached_arg_data: Optional[tuple] = None
324
+
325
+ def _determine_favorable_chunk_size(self, fn: Callable, args: tuple, min_chunk_size: int) -> int:
326
+ logging.info("Tuning chunk size...")
327
+
328
+ if min_chunk_size >= self.max_chunk_size:
329
+ return min_chunk_size
330
+
331
+ candidates: List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)]
332
+ candidates = [c for c in candidates if c > min_chunk_size]
333
+ candidates = [min_chunk_size] + candidates
334
+ candidates[-1] += 4
335
+
336
+ def test_chunk_size(chunk_size: int) -> bool:
337
+ try:
338
+ with torch.no_grad():
339
+ fn(*args, chunk_size=chunk_size)
340
+ return True
341
+ except RuntimeError:
342
+ return False
343
+
344
+ min_viable_chunk_size_index = 0
345
+ i = len(candidates) - 1
346
+ while i > min_viable_chunk_size_index:
347
+ viable = test_chunk_size(candidates[i])
348
+ if not viable:
349
+ i = (min_viable_chunk_size_index + i) // 2
350
+ else:
351
+ min_viable_chunk_size_index = i
352
+ i = (i + len(candidates) - 1) // 2
353
+
354
+ return candidates[min_viable_chunk_size_index]
355
+
356
+ def _compare_arg_caches(self, ac1: Iterable, ac2: Iterable) -> bool:
357
+ consistent = True
358
+ for a1, a2 in zip(ac1, ac2):
359
+ assert type(ac1) == type(ac2)
360
+ if isinstance(ac1, (list, tuple)):
361
+ consistent &= self._compare_arg_caches(a1, a2)
362
+ elif isinstance(ac1, dict):
363
+ a1_items = [v for _, v in sorted(a1.items(), key=lambda x: x[0])]
364
+ a2_items = [v for _, v in sorted(a2.items(), key=lambda x: x[0])]
365
+ consistent &= self._compare_arg_caches(a1_items, a2_items)
366
+ else:
367
+ consistent &= a1 == a2
368
+
369
+ return consistent
370
+
371
+ def tune_chunk_size(
372
+ self,
373
+ representative_fn: Callable,
374
+ args: tuple,
375
+ min_chunk_size: int,
376
+ ) -> int:
377
+ consistent = True
378
+ arg_data: tuple = tree_map(lambda a: a.shape if isinstance(a, torch.Tensor) else a, args, object)
379
+ if self.cached_arg_data is not None:
380
+ # If args have changed shape/value, we need to re-tune
381
+ assert len(self.cached_arg_data) == len(arg_data)
382
+ consistent = self._compare_arg_caches(self.cached_arg_data, arg_data)
383
+ else:
384
+ # Otherwise, we can reuse the precomputed value
385
+ consistent = False
386
+
387
+ if not consistent:
388
+ self.cached_chunk_size = self._determine_favorable_chunk_size(
389
+ representative_fn,
390
+ args,
391
+ min_chunk_size,
392
+ )
393
+ self.cached_arg_data = arg_data
394
+
395
+ assert self.cached_chunk_size is not None
396
+
397
+ return self.cached_chunk_size
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/data_transforms.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 AlQuraishi Laboratory
2
+ # Copyright 2021 DeepMind Technologies Limited
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Dict
17
+
18
+ import numpy as np
19
+ import torch
20
+
21
+ from . import residue_constants as rc
22
+ from .tensor_utils import tensor_tree_map, tree_map
23
+
24
+
25
+ def make_atom14_masks(protein: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
26
+ """Construct denser atom positions (14 dimensions instead of 37)."""
27
+ restype_atom14_to_atom37_list = []
28
+ restype_atom37_to_atom14_list = []
29
+ restype_atom14_mask_list = []
30
+
31
+ for rt in rc.restypes:
32
+ atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]]
33
+ restype_atom14_to_atom37_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
34
+ atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
35
+ restype_atom37_to_atom14_list.append(
36
+ [(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in rc.atom_types]
37
+ )
38
+
39
+ restype_atom14_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
40
+
41
+ # Add dummy mapping for restype 'UNK'
42
+ restype_atom14_to_atom37_list.append([0] * 14)
43
+ restype_atom37_to_atom14_list.append([0] * 37)
44
+ restype_atom14_mask_list.append([0.0] * 14)
45
+
46
+ restype_atom14_to_atom37 = torch.tensor(
47
+ restype_atom14_to_atom37_list,
48
+ dtype=torch.int32,
49
+ device=protein["aatype"].device,
50
+ )
51
+ restype_atom37_to_atom14 = torch.tensor(
52
+ restype_atom37_to_atom14_list,
53
+ dtype=torch.int32,
54
+ device=protein["aatype"].device,
55
+ )
56
+ restype_atom14_mask = torch.tensor(
57
+ restype_atom14_mask_list,
58
+ dtype=torch.float32,
59
+ device=protein["aatype"].device,
60
+ )
61
+ protein_aatype = protein["aatype"].to(torch.long)
62
+
63
+ # create the mapping for (residx, atom14) --> atom37, i.e. an array
64
+ # with shape (num_res, 14) containing the atom37 indices for this protein
65
+ residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype]
66
+ residx_atom14_mask = restype_atom14_mask[protein_aatype]
67
+
68
+ protein["atom14_atom_exists"] = residx_atom14_mask
69
+ protein["residx_atom14_to_atom37"] = residx_atom14_to_atom37.long()
70
+
71
+ # create the gather indices for mapping back
72
+ residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype]
73
+ protein["residx_atom37_to_atom14"] = residx_atom37_to_atom14.long()
74
+
75
+ # create the corresponding mask
76
+ restype_atom37_mask = torch.zeros([21, 37], dtype=torch.float32, device=protein["aatype"].device)
77
+ for restype, restype_letter in enumerate(rc.restypes):
78
+ restype_name = rc.restype_1to3[restype_letter]
79
+ atom_names = rc.residue_atoms[restype_name]
80
+ for atom_name in atom_names:
81
+ atom_type = rc.atom_order[atom_name]
82
+ restype_atom37_mask[restype, atom_type] = 1
83
+
84
+ residx_atom37_mask = restype_atom37_mask[protein_aatype]
85
+ protein["atom37_atom_exists"] = residx_atom37_mask
86
+
87
+ return protein
88
+
89
+
90
+ def make_atom14_masks_np(batch: Dict[str, torch.Tensor]) -> Dict[str, np.ndarray]:
91
+ batch = tree_map(lambda n: torch.tensor(n, device=batch["aatype"].device), batch, np.ndarray)
92
+ out = tensor_tree_map(lambda t: np.array(t), make_atom14_masks(batch))
93
+ return out
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/feats.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 AlQuraishi Laboratory
2
+ # Copyright 2021 DeepMind Technologies Limited
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Dict, Tuple, overload
17
+
18
+ import torch
19
+ import torch.types
20
+ from torch import nn
21
+
22
+ from . import residue_constants as rc
23
+ from .rigid_utils import Rigid, Rotation
24
+ from .tensor_utils import batched_gather
25
+
26
+
27
+ @overload
28
+ def pseudo_beta_fn(aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: None) -> torch.Tensor:
29
+ ...
30
+
31
+
32
+ @overload
33
+ def pseudo_beta_fn(
34
+ aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: torch.Tensor
35
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
36
+ ...
37
+
38
+
39
+ def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
40
+ is_gly = aatype == rc.restype_order["G"]
41
+ ca_idx = rc.atom_order["CA"]
42
+ cb_idx = rc.atom_order["CB"]
43
+ pseudo_beta = torch.where(
44
+ is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
45
+ all_atom_positions[..., ca_idx, :],
46
+ all_atom_positions[..., cb_idx, :],
47
+ )
48
+
49
+ if all_atom_masks is not None:
50
+ pseudo_beta_mask = torch.where(
51
+ is_gly,
52
+ all_atom_masks[..., ca_idx],
53
+ all_atom_masks[..., cb_idx],
54
+ )
55
+ return pseudo_beta, pseudo_beta_mask
56
+ else:
57
+ return pseudo_beta
58
+
59
+
60
+ def atom14_to_atom37(atom14: torch.Tensor, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
61
+ atom37_data = batched_gather(
62
+ atom14,
63
+ batch["residx_atom37_to_atom14"],
64
+ dim=-2,
65
+ no_batch_dims=len(atom14.shape[:-2]),
66
+ )
67
+
68
+ atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
69
+
70
+ return atom37_data
71
+
72
+
73
+ def build_template_angle_feat(template_feats: Dict[str, torch.Tensor]) -> torch.Tensor:
74
+ template_aatype = template_feats["template_aatype"]
75
+ torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"]
76
+ alt_torsion_angles_sin_cos = template_feats["template_alt_torsion_angles_sin_cos"]
77
+ torsion_angles_mask = template_feats["template_torsion_angles_mask"]
78
+ template_angle_feat = torch.cat(
79
+ [
80
+ nn.functional.one_hot(template_aatype, 22),
81
+ torsion_angles_sin_cos.reshape(*torsion_angles_sin_cos.shape[:-2], 14),
82
+ alt_torsion_angles_sin_cos.reshape(*alt_torsion_angles_sin_cos.shape[:-2], 14),
83
+ torsion_angles_mask,
84
+ ],
85
+ dim=-1,
86
+ )
87
+
88
+ return template_angle_feat
89
+
90
+
91
+ def build_template_pair_feat(
92
+ batch: Dict[str, torch.Tensor],
93
+ min_bin: torch.types.Number,
94
+ max_bin: torch.types.Number,
95
+ no_bins: int,
96
+ use_unit_vector: bool = False,
97
+ eps: float = 1e-20,
98
+ inf: float = 1e8,
99
+ ) -> torch.Tensor:
100
+ template_mask = batch["template_pseudo_beta_mask"]
101
+ template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
102
+
103
+ # Compute distogram (this seems to differ slightly from Alg. 5)
104
+ tpb = batch["template_pseudo_beta"]
105
+ dgram = torch.sum((tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True)
106
+ lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2
107
+ upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
108
+ dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
109
+
110
+ to_concat = [dgram, template_mask_2d[..., None]]
111
+
112
+ aatype_one_hot: torch.LongTensor = nn.functional.one_hot(
113
+ batch["template_aatype"],
114
+ rc.restype_num + 2,
115
+ )
116
+
117
+ n_res = batch["template_aatype"].shape[-1]
118
+ to_concat.append(aatype_one_hot[..., None, :, :].expand(*aatype_one_hot.shape[:-2], n_res, -1, -1))
119
+ to_concat.append(aatype_one_hot[..., None, :].expand(*aatype_one_hot.shape[:-2], -1, n_res, -1))
120
+
121
+ n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]]
122
+ rigids = Rigid.make_transform_from_reference(
123
+ n_xyz=batch["template_all_atom_positions"][..., n, :],
124
+ ca_xyz=batch["template_all_atom_positions"][..., ca, :],
125
+ c_xyz=batch["template_all_atom_positions"][..., c, :],
126
+ eps=eps,
127
+ )
128
+ points = rigids.get_trans()[..., None, :, :]
129
+ rigid_vec = rigids[..., None].invert_apply(points)
130
+
131
+ inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec**2, dim=-1))
132
+
133
+ t_aa_masks = batch["template_all_atom_mask"]
134
+ template_mask = t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c]
135
+ template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
136
+
137
+ inv_distance_scalar = inv_distance_scalar * template_mask_2d
138
+ unit_vector = rigid_vec * inv_distance_scalar[..., None]
139
+
140
+ if not use_unit_vector:
141
+ unit_vector = unit_vector * 0.0
142
+
143
+ to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1))
144
+ to_concat.append(template_mask_2d[..., None])
145
+
146
+ act = torch.cat(to_concat, dim=-1)
147
+ act = act * template_mask_2d[..., None]
148
+
149
+ return act
150
+
151
+
152
+ def build_extra_msa_feat(batch: Dict[str, torch.Tensor]) -> torch.Tensor:
153
+ msa_1hot: torch.LongTensor = nn.functional.one_hot(batch["extra_msa"], 23)
154
+ msa_feat = [
155
+ msa_1hot,
156
+ batch["extra_has_deletion"].unsqueeze(-1),
157
+ batch["extra_deletion_value"].unsqueeze(-1),
158
+ ]
159
+ return torch.cat(msa_feat, dim=-1)
160
+
161
+
162
+ def torsion_angles_to_frames(
163
+ r: Rigid,
164
+ alpha: torch.Tensor,
165
+ aatype: torch.Tensor,
166
+ rrgdf: torch.Tensor,
167
+ ) -> Rigid:
168
+ # [*, N, 8, 4, 4]
169
+ default_4x4 = rrgdf[aatype, ...]
170
+
171
+ # [*, N, 8] transformations, i.e.
172
+ # One [*, N, 8, 3, 3] rotation matrix and
173
+ # One [*, N, 8, 3] translation matrix
174
+ default_r = r.from_tensor_4x4(default_4x4)
175
+
176
+ bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))
177
+ bb_rot[..., 1] = 1
178
+
179
+ # [*, N, 8, 2]
180
+ alpha = torch.cat([bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2)
181
+
182
+ # [*, N, 8, 3, 3]
183
+ # Produces rotation matrices of the form:
184
+ # [
185
+ # [1, 0 , 0 ],
186
+ # [0, a_2,-a_1],
187
+ # [0, a_1, a_2]
188
+ # ]
189
+ # This follows the original code rather than the supplement, which uses
190
+ # different indices.
191
+
192
+ all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)
193
+ all_rots[..., 0, 0] = 1
194
+ all_rots[..., 1, 1] = alpha[..., 1]
195
+ all_rots[..., 1, 2] = -alpha[..., 0]
196
+ all_rots[..., 2, 1:] = alpha
197
+
198
+ all_frames = default_r.compose(Rigid(Rotation(rot_mats=all_rots), None))
199
+
200
+ chi2_frame_to_frame = all_frames[..., 5]
201
+ chi3_frame_to_frame = all_frames[..., 6]
202
+ chi4_frame_to_frame = all_frames[..., 7]
203
+
204
+ chi1_frame_to_bb = all_frames[..., 4]
205
+ chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)
206
+ chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)
207
+ chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)
208
+
209
+ all_frames_to_bb = Rigid.cat(
210
+ [
211
+ all_frames[..., :5],
212
+ chi2_frame_to_bb.unsqueeze(-1),
213
+ chi3_frame_to_bb.unsqueeze(-1),
214
+ chi4_frame_to_bb.unsqueeze(-1),
215
+ ],
216
+ dim=-1,
217
+ )
218
+
219
+ all_frames_to_global = r[..., None].compose(all_frames_to_bb)
220
+
221
+ return all_frames_to_global
222
+
223
+
224
+ def frames_and_literature_positions_to_atom14_pos(
225
+ r: Rigid,
226
+ aatype: torch.Tensor,
227
+ default_frames: torch.Tensor,
228
+ group_idx: torch.Tensor,
229
+ atom_mask: torch.Tensor,
230
+ lit_positions: torch.Tensor,
231
+ ) -> torch.Tensor:
232
+ # [*, N, 14]
233
+ group_mask = group_idx[aatype, ...]
234
+
235
+ # [*, N, 14, 8]
236
+ group_mask_one_hot: torch.LongTensor = nn.functional.one_hot(
237
+ group_mask,
238
+ num_classes=default_frames.shape[-3],
239
+ )
240
+
241
+ # [*, N, 14, 8]
242
+ t_atoms_to_global = r[..., None, :] * group_mask_one_hot
243
+
244
+ # [*, N, 14]
245
+ t_atoms_to_global = t_atoms_to_global.map_tensor_fn(lambda x: torch.sum(x, dim=-1))
246
+
247
+ # [*, N, 14, 1]
248
+ atom_mask = atom_mask[aatype, ...].unsqueeze(-1)
249
+
250
+ # [*, N, 14, 3]
251
+ lit_positions = lit_positions[aatype, ...]
252
+ pred_positions = t_atoms_to_global.apply(lit_positions)
253
+ pred_positions = pred_positions * atom_mask
254
+
255
+ return pred_positions
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/loss.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 AlQuraishi Laboratory
2
+ # Copyright 2021 DeepMind Technologies Limited
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Dict, Optional, Tuple
17
+
18
+ import torch
19
+
20
+
21
+ def _calculate_bin_centers(boundaries: torch.Tensor) -> torch.Tensor:
22
+ step = boundaries[1] - boundaries[0]
23
+ bin_centers = boundaries + step / 2
24
+ bin_centers = torch.cat([bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0)
25
+ return bin_centers
26
+
27
+
28
+ def _calculate_expected_aligned_error(
29
+ alignment_confidence_breaks: torch.Tensor,
30
+ aligned_distance_error_probs: torch.Tensor,
31
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
32
+ bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
33
+ return (
34
+ torch.sum(aligned_distance_error_probs * bin_centers, dim=-1),
35
+ bin_centers[-1],
36
+ )
37
+
38
+
39
+ def compute_predicted_aligned_error(
40
+ logits: torch.Tensor,
41
+ max_bin: int = 31,
42
+ no_bins: int = 64,
43
+ **kwargs,
44
+ ) -> Dict[str, torch.Tensor]:
45
+ """Computes aligned confidence metrics from logits.
46
+
47
+ Args:
48
+ logits: [*, num_res, num_res, num_bins] the logits output from
49
+ PredictedAlignedErrorHead.
50
+ max_bin: Maximum bin value
51
+ no_bins: Number of bins
52
+ Returns:
53
+ aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted
54
+ aligned error probabilities over bins for each residue pair.
55
+ predicted_aligned_error: [*, num_res, num_res] the expected aligned distance
56
+ error for each pair of residues.
57
+ max_predicted_aligned_error: [*] the maximum predicted error possible.
58
+ """
59
+ boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
60
+
61
+ aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)
62
+ predicted_aligned_error, max_predicted_aligned_error = _calculate_expected_aligned_error(
63
+ alignment_confidence_breaks=boundaries,
64
+ aligned_distance_error_probs=aligned_confidence_probs,
65
+ )
66
+
67
+ return {
68
+ "aligned_confidence_probs": aligned_confidence_probs,
69
+ "predicted_aligned_error": predicted_aligned_error,
70
+ "max_predicted_aligned_error": max_predicted_aligned_error,
71
+ }
72
+
73
+
74
+ def compute_tm(
75
+ logits: torch.Tensor,
76
+ residue_weights: Optional[torch.Tensor] = None,
77
+ max_bin: int = 31,
78
+ no_bins: int = 64,
79
+ eps: float = 1e-8,
80
+ **kwargs,
81
+ ) -> torch.Tensor:
82
+ if residue_weights is None:
83
+ residue_weights = logits.new_ones(logits.shape[-2])
84
+
85
+ boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
86
+
87
+ bin_centers = _calculate_bin_centers(boundaries)
88
+ torch.sum(residue_weights)
89
+ n = logits.shape[-2]
90
+ clipped_n = max(n, 19)
91
+
92
+ d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8
93
+
94
+ probs = torch.nn.functional.softmax(logits, dim=-1)
95
+
96
+ tm_per_bin = 1.0 / (1 + (bin_centers**2) / (d0**2))
97
+ predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1)
98
+
99
+ normed_residue_mask = residue_weights / (eps + residue_weights.sum())
100
+ per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1)
101
+
102
+ weighted = per_alignment * residue_weights
103
+
104
+ argmax = (weighted == torch.max(weighted)).nonzero()[0]
105
+ return per_alignment[tuple(argmax)]
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/protein.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 AlQuraishi Laboratory
2
+ # Copyright 2021 DeepMind Technologies Limited
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Protein data type."""
17
+ import dataclasses
18
+ import re
19
+ import string
20
+ from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
21
+
22
+ import numpy as np
23
+
24
+ from . import residue_constants
25
+
26
+
27
+ FeatureDict = Mapping[str, np.ndarray]
28
+ ModelOutput = Mapping[str, Any] # Is a nested dict.
29
+ PICO_TO_ANGSTROM = 0.01
30
+
31
+
32
+ @dataclasses.dataclass(frozen=True)
33
+ class Protein:
34
+ """Protein structure representation."""
35
+
36
+ # Cartesian coordinates of atoms in angstroms. The atom types correspond to
37
+ # residue_constants.atom_types, i.e. the first three are N, CA, CB.
38
+ atom_positions: np.ndarray # [num_res, num_atom_type, 3]
39
+
40
+ # Amino-acid type for each residue represented as an integer between 0 and
41
+ # 20, where 20 is 'X'.
42
+ aatype: np.ndarray # [num_res]
43
+
44
+ # Binary float mask to indicate presence of a particular atom. 1.0 if an atom
45
+ # is present and 0.0 if not. This should be used for loss masking.
46
+ atom_mask: np.ndarray # [num_res, num_atom_type]
47
+
48
+ # Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
49
+ residue_index: np.ndarray # [num_res]
50
+
51
+ # B-factors, or temperature factors, of each residue (in sq. angstroms units),
52
+ # representing the displacement of the residue from its ground truth mean
53
+ # value.
54
+ b_factors: np.ndarray # [num_res, num_atom_type]
55
+
56
+ # Chain indices for multi-chain predictions
57
+ chain_index: Optional[np.ndarray] = None
58
+
59
+ # Optional remark about the protein. Included as a comment in output PDB
60
+ # files
61
+ remark: Optional[str] = None
62
+
63
+ # Templates used to generate this protein (prediction-only)
64
+ parents: Optional[Sequence[str]] = None
65
+
66
+ # Chain corresponding to each parent
67
+ parents_chain_index: Optional[Sequence[int]] = None
68
+
69
+
70
+ def from_proteinnet_string(proteinnet_str: str) -> Protein:
71
+ tag_re = r"(\[[A-Z]+\]\n)"
72
+ tags: List[str] = [tag.strip() for tag in re.split(tag_re, proteinnet_str) if len(tag) > 0]
73
+ groups: Iterator[Tuple[str, List[str]]] = zip(tags[0::2], [l.split("\n") for l in tags[1::2]])
74
+
75
+ atoms: List[str] = ["N", "CA", "C"]
76
+ aatype = None
77
+ atom_positions = None
78
+ atom_mask = None
79
+ for g in groups:
80
+ if "[PRIMARY]" == g[0]:
81
+ seq = g[1][0].strip()
82
+ for i in range(len(seq)):
83
+ if seq[i] not in residue_constants.restypes:
84
+ seq[i] = "X" # FIXME: strings are immutable
85
+ aatype = np.array(
86
+ [residue_constants.restype_order.get(res_symbol, residue_constants.restype_num) for res_symbol in seq]
87
+ )
88
+ elif "[TERTIARY]" == g[0]:
89
+ tertiary: List[List[float]] = []
90
+ for axis in range(3):
91
+ tertiary.append(list(map(float, g[1][axis].split())))
92
+ tertiary_np = np.array(tertiary)
93
+ atom_positions = np.zeros((len(tertiary[0]) // 3, residue_constants.atom_type_num, 3)).astype(np.float32)
94
+ for i, atom in enumerate(atoms):
95
+ atom_positions[:, residue_constants.atom_order[atom], :] = np.transpose(tertiary_np[:, i::3])
96
+ atom_positions *= PICO_TO_ANGSTROM
97
+ elif "[MASK]" == g[0]:
98
+ mask = np.array(list(map({"-": 0, "+": 1}.get, g[1][0].strip())))
99
+ atom_mask = np.zeros(
100
+ (
101
+ len(mask),
102
+ residue_constants.atom_type_num,
103
+ )
104
+ ).astype(np.float32)
105
+ for i, atom in enumerate(atoms):
106
+ atom_mask[:, residue_constants.atom_order[atom]] = 1
107
+ atom_mask *= mask[..., None]
108
+
109
+ assert aatype is not None
110
+
111
+ return Protein(
112
+ atom_positions=atom_positions,
113
+ atom_mask=atom_mask,
114
+ aatype=aatype,
115
+ residue_index=np.arange(len(aatype)),
116
+ b_factors=None,
117
+ )
118
+
119
+
120
+ def get_pdb_headers(prot: Protein, chain_id: int = 0) -> List[str]:
121
+ pdb_headers: List[str] = []
122
+
123
+ remark = prot.remark
124
+ if remark is not None:
125
+ pdb_headers.append(f"REMARK {remark}")
126
+
127
+ parents = prot.parents
128
+ parents_chain_index = prot.parents_chain_index
129
+ if parents is not None and parents_chain_index is not None:
130
+ parents = [p for i, p in zip(parents_chain_index, parents) if i == chain_id]
131
+
132
+ if parents is None or len(parents) == 0:
133
+ parents = ["N/A"]
134
+
135
+ pdb_headers.append(f"PARENT {' '.join(parents)}")
136
+
137
+ return pdb_headers
138
+
139
+
140
+ def add_pdb_headers(prot: Protein, pdb_str: str) -> str:
141
+ """Add pdb headers to an existing PDB string. Useful during multi-chain
142
+ recycling
143
+ """
144
+ out_pdb_lines: List[str] = []
145
+ lines = pdb_str.split("\n")
146
+
147
+ remark = prot.remark
148
+ if remark is not None:
149
+ out_pdb_lines.append(f"REMARK {remark}")
150
+
151
+ parents_per_chain: List[List[str]]
152
+ if prot.parents is not None and len(prot.parents) > 0:
153
+ parents_per_chain = []
154
+ if prot.parents_chain_index is not None:
155
+ parent_dict: Dict[str, List[str]] = {}
156
+ for p, i in zip(prot.parents, prot.parents_chain_index):
157
+ parent_dict.setdefault(str(i), [])
158
+ parent_dict[str(i)].append(p)
159
+
160
+ max_idx = max([int(chain_idx) for chain_idx in parent_dict])
161
+ for i in range(max_idx + 1):
162
+ chain_parents = parent_dict.get(str(i), ["N/A"])
163
+ parents_per_chain.append(chain_parents)
164
+ else:
165
+ parents_per_chain.append(list(prot.parents))
166
+ else:
167
+ parents_per_chain = [["N/A"]]
168
+
169
+ def make_parent_line(p: Sequence[str]) -> str:
170
+ return f"PARENT {' '.join(p)}"
171
+
172
+ out_pdb_lines.append(make_parent_line(parents_per_chain[0]))
173
+
174
+ chain_counter = 0
175
+ for i, l in enumerate(lines):
176
+ if "PARENT" not in l and "REMARK" not in l:
177
+ out_pdb_lines.append(l)
178
+ if "TER" in l and "END" not in lines[i + 1]:
179
+ chain_counter += 1
180
+ if not chain_counter >= len(parents_per_chain):
181
+ chain_parents = parents_per_chain[chain_counter]
182
+ else:
183
+ chain_parents = ["N/A"]
184
+
185
+ out_pdb_lines.append(make_parent_line(chain_parents))
186
+
187
+ return "\n".join(out_pdb_lines)
188
+
189
+
190
+ def to_pdb(prot: Protein) -> str:
191
+ """Converts a `Protein` instance to a PDB string.
192
+
193
+ Args:
194
+ prot: The protein to convert to PDB.
195
+
196
+ Returns:
197
+ PDB string.
198
+ """
199
+ restypes = residue_constants.restypes + ["X"]
200
+
201
+ def res_1to3(r: int) -> str:
202
+ return residue_constants.restype_1to3.get(restypes[r], "UNK")
203
+
204
+ atom_types = residue_constants.atom_types
205
+
206
+ pdb_lines: List[str] = []
207
+
208
+ atom_mask = prot.atom_mask
209
+ aatype = prot.aatype
210
+ atom_positions = prot.atom_positions
211
+ residue_index = prot.residue_index.astype(np.int32)
212
+ b_factors = prot.b_factors
213
+ chain_index = prot.chain_index
214
+
215
+ if np.any(aatype > residue_constants.restype_num):
216
+ raise ValueError("Invalid aatypes.")
217
+
218
+ headers = get_pdb_headers(prot)
219
+ if len(headers) > 0:
220
+ pdb_lines.extend(headers)
221
+
222
+ n = aatype.shape[0]
223
+ atom_index = 1
224
+ prev_chain_index = 0
225
+ chain_tags = string.ascii_uppercase
226
+ chain_tag = None
227
+ # Add all atom sites.
228
+ for i in range(n):
229
+ res_name_3 = res_1to3(aatype[i])
230
+ for atom_name, pos, mask, b_factor in zip(atom_types, atom_positions[i], atom_mask[i], b_factors[i]):
231
+ if mask < 0.5:
232
+ continue
233
+
234
+ record_type = "ATOM"
235
+ name = atom_name if len(atom_name) == 4 else f" {atom_name}"
236
+ alt_loc = ""
237
+ insertion_code = ""
238
+ occupancy = 1.00
239
+ element = atom_name[0] # Protein supports only C, N, O, S, this works.
240
+ charge = ""
241
+
242
+ chain_tag = "A"
243
+ if chain_index is not None:
244
+ chain_tag = chain_tags[chain_index[i]]
245
+
246
+ # PDB is a columnar format, every space matters here!
247
+ atom_line = (
248
+ f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
249
+ f"{res_name_3:>3} {chain_tag:>1}"
250
+ f"{residue_index[i]:>4}{insertion_code:>1} "
251
+ f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
252
+ f"{occupancy:>6.2f}{b_factor:>6.2f} "
253
+ f"{element:>2}{charge:>2}"
254
+ )
255
+ pdb_lines.append(atom_line)
256
+ atom_index += 1
257
+
258
+ should_terminate = i == n - 1
259
+ if chain_index is not None:
260
+ if i != n - 1 and chain_index[i + 1] != prev_chain_index:
261
+ should_terminate = True
262
+ prev_chain_index = chain_index[i + 1]
263
+
264
+ if should_terminate:
265
+ # Close the chain.
266
+ chain_end = "TER"
267
+ chain_termination_line = (
268
+ f"{chain_end:<6}{atom_index:>5} {res_1to3(aatype[i]):>3} {chain_tag:>1}{residue_index[i]:>4}"
269
+ )
270
+ pdb_lines.append(chain_termination_line)
271
+ atom_index += 1
272
+
273
+ if i != n - 1:
274
+ # "prev" is a misnomer here. This happens at the beginning of
275
+ # each new chain.
276
+ pdb_lines.extend(get_pdb_headers(prot, prev_chain_index))
277
+
278
+ pdb_lines.append("END")
279
+ pdb_lines.append("")
280
+ return "\n".join(pdb_lines)
281
+
282
+
283
+ def ideal_atom_mask(prot: Protein) -> np.ndarray:
284
+ """Computes an ideal atom mask.
285
+
286
+ `Protein.atom_mask` typically is defined according to the atoms that are reported in the PDB. This function
287
+ computes a mask according to heavy atoms that should be present in the given sequence of amino acids.
288
+
289
+ Args:
290
+ prot: `Protein` whose fields are `numpy.ndarray` objects.
291
+
292
+ Returns:
293
+ An ideal atom mask.
294
+ """
295
+ return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
296
+
297
+
298
+ def from_prediction(
299
+ features: FeatureDict,
300
+ result: ModelOutput,
301
+ b_factors: Optional[np.ndarray] = None,
302
+ chain_index: Optional[np.ndarray] = None,
303
+ remark: Optional[str] = None,
304
+ parents: Optional[Sequence[str]] = None,
305
+ parents_chain_index: Optional[Sequence[int]] = None,
306
+ ) -> Protein:
307
+ """Assembles a protein from a prediction.
308
+
309
+ Args:
310
+ features: Dictionary holding model inputs.
311
+ result: Dictionary holding model outputs.
312
+ b_factors: (Optional) B-factors to use for the protein.
313
+ chain_index: (Optional) Chain indices for multi-chain predictions
314
+ remark: (Optional) Remark about the prediction
315
+ parents: (Optional) List of template names
316
+ Returns:
317
+ A protein instance.
318
+ """
319
+ return Protein(
320
+ aatype=features["aatype"],
321
+ atom_positions=result["final_atom_positions"],
322
+ atom_mask=result["final_atom_mask"],
323
+ residue_index=features["residue_index"] + 1,
324
+ b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"]),
325
+ chain_index=chain_index,
326
+ remark=remark,
327
+ parents=parents,
328
+ parents_chain_index=parents_chain_index,
329
+ )
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/residue_constants.py ADDED
@@ -0,0 +1,983 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 AlQuraishi Laboratory
2
+ # Copyright 2021 DeepMind Technologies Limited
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Constants used in AlphaFold."""
17
+
18
+ import collections
19
+ import copy
20
+ import functools
21
+ from importlib import resources
22
+ from typing import Dict, List, Mapping, Sequence, Tuple
23
+
24
+ import numpy as np
25
+
26
+
27
+ # Internal import (35fd).
28
+
29
+
30
+ # Distance from one CA to next CA [trans configuration: omega = 180].
31
+ ca_ca = 3.80209737096
32
+
33
+ # Format: The list for each AA type contains chi1, chi2, chi3, chi4 in
34
+ # this order (or a relevant subset from chi1 onwards). ALA and GLY don't have
35
+ # chi angles so their chi angle lists are empty.
36
+ chi_angles_atoms: Dict[str, List[List[str]]] = {
37
+ "ALA": [],
38
+ # Chi5 in arginine is always 0 +- 5 degrees, so ignore it.
39
+ "ARG": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "NE"], ["CG", "CD", "NE", "CZ"]],
40
+ "ASN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
41
+ "ASP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
42
+ "CYS": [["N", "CA", "CB", "SG"]],
43
+ "GLN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "OE1"]],
44
+ "GLU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "OE1"]],
45
+ "GLY": [],
46
+ "HIS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "ND1"]],
47
+ "ILE": [["N", "CA", "CB", "CG1"], ["CA", "CB", "CG1", "CD1"]],
48
+ "LEU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
49
+ "LYS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "CE"], ["CG", "CD", "CE", "NZ"]],
50
+ "MET": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "SD"], ["CB", "CG", "SD", "CE"]],
51
+ "PHE": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
52
+ "PRO": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"]],
53
+ "SER": [["N", "CA", "CB", "OG"]],
54
+ "THR": [["N", "CA", "CB", "OG1"]],
55
+ "TRP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
56
+ "TYR": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
57
+ "VAL": [["N", "CA", "CB", "CG1"]],
58
+ }
59
+
60
+ # If chi angles given in fixed-length array, this matrix determines how to mask
61
+ # them for each AA type. The order is as per restype_order (see below).
62
+ chi_angles_mask: List[List[float]] = [
63
+ [0.0, 0.0, 0.0, 0.0], # ALA
64
+ [1.0, 1.0, 1.0, 1.0], # ARG
65
+ [1.0, 1.0, 0.0, 0.0], # ASN
66
+ [1.0, 1.0, 0.0, 0.0], # ASP
67
+ [1.0, 0.0, 0.0, 0.0], # CYS
68
+ [1.0, 1.0, 1.0, 0.0], # GLN
69
+ [1.0, 1.0, 1.0, 0.0], # GLU
70
+ [0.0, 0.0, 0.0, 0.0], # GLY
71
+ [1.0, 1.0, 0.0, 0.0], # HIS
72
+ [1.0, 1.0, 0.0, 0.0], # ILE
73
+ [1.0, 1.0, 0.0, 0.0], # LEU
74
+ [1.0, 1.0, 1.0, 1.0], # LYS
75
+ [1.0, 1.0, 1.0, 0.0], # MET
76
+ [1.0, 1.0, 0.0, 0.0], # PHE
77
+ [1.0, 1.0, 0.0, 0.0], # PRO
78
+ [1.0, 0.0, 0.0, 0.0], # SER
79
+ [1.0, 0.0, 0.0, 0.0], # THR
80
+ [1.0, 1.0, 0.0, 0.0], # TRP
81
+ [1.0, 1.0, 0.0, 0.0], # TYR
82
+ [1.0, 0.0, 0.0, 0.0], # VAL
83
+ ]
84
+
85
+ # The following chi angles are pi periodic: they can be rotated by a multiple
86
+ # of pi without affecting the structure.
87
+ chi_pi_periodic: List[List[float]] = [
88
+ [0.0, 0.0, 0.0, 0.0], # ALA
89
+ [0.0, 0.0, 0.0, 0.0], # ARG
90
+ [0.0, 0.0, 0.0, 0.0], # ASN
91
+ [0.0, 1.0, 0.0, 0.0], # ASP
92
+ [0.0, 0.0, 0.0, 0.0], # CYS
93
+ [0.0, 0.0, 0.0, 0.0], # GLN
94
+ [0.0, 0.0, 1.0, 0.0], # GLU
95
+ [0.0, 0.0, 0.0, 0.0], # GLY
96
+ [0.0, 0.0, 0.0, 0.0], # HIS
97
+ [0.0, 0.0, 0.0, 0.0], # ILE
98
+ [0.0, 0.0, 0.0, 0.0], # LEU
99
+ [0.0, 0.0, 0.0, 0.0], # LYS
100
+ [0.0, 0.0, 0.0, 0.0], # MET
101
+ [0.0, 1.0, 0.0, 0.0], # PHE
102
+ [0.0, 0.0, 0.0, 0.0], # PRO
103
+ [0.0, 0.0, 0.0, 0.0], # SER
104
+ [0.0, 0.0, 0.0, 0.0], # THR
105
+ [0.0, 0.0, 0.0, 0.0], # TRP
106
+ [0.0, 1.0, 0.0, 0.0], # TYR
107
+ [0.0, 0.0, 0.0, 0.0], # VAL
108
+ [0.0, 0.0, 0.0, 0.0], # UNK
109
+ ]
110
+
111
+ # Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi,
112
+ # psi and chi angles:
113
+ # 0: 'backbone group',
114
+ # 1: 'pre-omega-group', (empty)
115
+ # 2: 'phi-group', (currently empty, because it defines only hydrogens)
116
+ # 3: 'psi-group',
117
+ # 4,5,6,7: 'chi1,2,3,4-group'
118
+ # The atom positions are relative to the axis-end-atom of the corresponding
119
+ # rotation axis. The x-axis is in direction of the rotation axis, and the y-axis
120
+ # is defined such that the dihedral-angle-definiting atom (the last entry in
121
+ # chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate).
122
+ # format: [atomname, group_idx, rel_position]
123
+ rigid_group_atom_positions: Dict[str, List[Tuple[str, int, Tuple[float, float, float]]]] = {
124
+ "ALA": [
125
+ ("N", 0, (-0.525, 1.363, 0.000)),
126
+ ("CA", 0, (0.000, 0.000, 0.000)),
127
+ ("C", 0, (1.526, -0.000, -0.000)),
128
+ ("CB", 0, (-0.529, -0.774, -1.205)),
129
+ ("O", 3, (0.627, 1.062, 0.000)),
130
+ ],
131
+ "ARG": [
132
+ ("N", 0, (-0.524, 1.362, -0.000)),
133
+ ("CA", 0, (0.000, 0.000, 0.000)),
134
+ ("C", 0, (1.525, -0.000, -0.000)),
135
+ ("CB", 0, (-0.524, -0.778, -1.209)),
136
+ ("O", 3, (0.626, 1.062, 0.000)),
137
+ ("CG", 4, (0.616, 1.390, -0.000)),
138
+ ("CD", 5, (0.564, 1.414, 0.000)),
139
+ ("NE", 6, (0.539, 1.357, -0.000)),
140
+ ("NH1", 7, (0.206, 2.301, 0.000)),
141
+ ("NH2", 7, (2.078, 0.978, -0.000)),
142
+ ("CZ", 7, (0.758, 1.093, -0.000)),
143
+ ],
144
+ "ASN": [
145
+ ("N", 0, (-0.536, 1.357, 0.000)),
146
+ ("CA", 0, (0.000, 0.000, 0.000)),
147
+ ("C", 0, (1.526, -0.000, -0.000)),
148
+ ("CB", 0, (-0.531, -0.787, -1.200)),
149
+ ("O", 3, (0.625, 1.062, 0.000)),
150
+ ("CG", 4, (0.584, 1.399, 0.000)),
151
+ ("ND2", 5, (0.593, -1.188, 0.001)),
152
+ ("OD1", 5, (0.633, 1.059, 0.000)),
153
+ ],
154
+ "ASP": [
155
+ ("N", 0, (-0.525, 1.362, -0.000)),
156
+ ("CA", 0, (0.000, 0.000, 0.000)),
157
+ ("C", 0, (1.527, 0.000, -0.000)),
158
+ ("CB", 0, (-0.526, -0.778, -1.208)),
159
+ ("O", 3, (0.626, 1.062, -0.000)),
160
+ ("CG", 4, (0.593, 1.398, -0.000)),
161
+ ("OD1", 5, (0.610, 1.091, 0.000)),
162
+ ("OD2", 5, (0.592, -1.101, -0.003)),
163
+ ],
164
+ "CYS": [
165
+ ("N", 0, (-0.522, 1.362, -0.000)),
166
+ ("CA", 0, (0.000, 0.000, 0.000)),
167
+ ("C", 0, (1.524, 0.000, 0.000)),
168
+ ("CB", 0, (-0.519, -0.773, -1.212)),
169
+ ("O", 3, (0.625, 1.062, -0.000)),
170
+ ("SG", 4, (0.728, 1.653, 0.000)),
171
+ ],
172
+ "GLN": [
173
+ ("N", 0, (-0.526, 1.361, -0.000)),
174
+ ("CA", 0, (0.000, 0.000, 0.000)),
175
+ ("C", 0, (1.526, 0.000, 0.000)),
176
+ ("CB", 0, (-0.525, -0.779, -1.207)),
177
+ ("O", 3, (0.626, 1.062, -0.000)),
178
+ ("CG", 4, (0.615, 1.393, 0.000)),
179
+ ("CD", 5, (0.587, 1.399, -0.000)),
180
+ ("NE2", 6, (0.593, -1.189, -0.001)),
181
+ ("OE1", 6, (0.634, 1.060, 0.000)),
182
+ ],
183
+ "GLU": [
184
+ ("N", 0, (-0.528, 1.361, 0.000)),
185
+ ("CA", 0, (0.000, 0.000, 0.000)),
186
+ ("C", 0, (1.526, -0.000, -0.000)),
187
+ ("CB", 0, (-0.526, -0.781, -1.207)),
188
+ ("O", 3, (0.626, 1.062, 0.000)),
189
+ ("CG", 4, (0.615, 1.392, 0.000)),
190
+ ("CD", 5, (0.600, 1.397, 0.000)),
191
+ ("OE1", 6, (0.607, 1.095, -0.000)),
192
+ ("OE2", 6, (0.589, -1.104, -0.001)),
193
+ ],
194
+ "GLY": [
195
+ ("N", 0, (-0.572, 1.337, 0.000)),
196
+ ("CA", 0, (0.000, 0.000, 0.000)),
197
+ ("C", 0, (1.517, -0.000, -0.000)),
198
+ ("O", 3, (0.626, 1.062, -0.000)),
199
+ ],
200
+ "HIS": [
201
+ ("N", 0, (-0.527, 1.360, 0.000)),
202
+ ("CA", 0, (0.000, 0.000, 0.000)),
203
+ ("C", 0, (1.525, 0.000, 0.000)),
204
+ ("CB", 0, (-0.525, -0.778, -1.208)),
205
+ ("O", 3, (0.625, 1.063, 0.000)),
206
+ ("CG", 4, (0.600, 1.370, -0.000)),
207
+ ("CD2", 5, (0.889, -1.021, 0.003)),
208
+ ("ND1", 5, (0.744, 1.160, -0.000)),
209
+ ("CE1", 5, (2.030, 0.851, 0.002)),
210
+ ("NE2", 5, (2.145, -0.466, 0.004)),
211
+ ],
212
+ "ILE": [
213
+ ("N", 0, (-0.493, 1.373, -0.000)),
214
+ ("CA", 0, (0.000, 0.000, 0.000)),
215
+ ("C", 0, (1.527, -0.000, -0.000)),
216
+ ("CB", 0, (-0.536, -0.793, -1.213)),
217
+ ("O", 3, (0.627, 1.062, -0.000)),
218
+ ("CG1", 4, (0.534, 1.437, -0.000)),
219
+ ("CG2", 4, (0.540, -0.785, -1.199)),
220
+ ("CD1", 5, (0.619, 1.391, 0.000)),
221
+ ],
222
+ "LEU": [
223
+ ("N", 0, (-0.520, 1.363, 0.000)),
224
+ ("CA", 0, (0.000, 0.000, 0.000)),
225
+ ("C", 0, (1.525, -0.000, -0.000)),
226
+ ("CB", 0, (-0.522, -0.773, -1.214)),
227
+ ("O", 3, (0.625, 1.063, -0.000)),
228
+ ("CG", 4, (0.678, 1.371, 0.000)),
229
+ ("CD1", 5, (0.530, 1.430, -0.000)),
230
+ ("CD2", 5, (0.535, -0.774, 1.200)),
231
+ ],
232
+ "LYS": [
233
+ ("N", 0, (-0.526, 1.362, -0.000)),
234
+ ("CA", 0, (0.000, 0.000, 0.000)),
235
+ ("C", 0, (1.526, 0.000, 0.000)),
236
+ ("CB", 0, (-0.524, -0.778, -1.208)),
237
+ ("O", 3, (0.626, 1.062, -0.000)),
238
+ ("CG", 4, (0.619, 1.390, 0.000)),
239
+ ("CD", 5, (0.559, 1.417, 0.000)),
240
+ ("CE", 6, (0.560, 1.416, 0.000)),
241
+ ("NZ", 7, (0.554, 1.387, 0.000)),
242
+ ],
243
+ "MET": [
244
+ ("N", 0, (-0.521, 1.364, -0.000)),
245
+ ("CA", 0, (0.000, 0.000, 0.000)),
246
+ ("C", 0, (1.525, 0.000, 0.000)),
247
+ ("CB", 0, (-0.523, -0.776, -1.210)),
248
+ ("O", 3, (0.625, 1.062, -0.000)),
249
+ ("CG", 4, (0.613, 1.391, -0.000)),
250
+ ("SD", 5, (0.703, 1.695, 0.000)),
251
+ ("CE", 6, (0.320, 1.786, -0.000)),
252
+ ],
253
+ "PHE": [
254
+ ("N", 0, (-0.518, 1.363, 0.000)),
255
+ ("CA", 0, (0.000, 0.000, 0.000)),
256
+ ("C", 0, (1.524, 0.000, -0.000)),
257
+ ("CB", 0, (-0.525, -0.776, -1.212)),
258
+ ("O", 3, (0.626, 1.062, -0.000)),
259
+ ("CG", 4, (0.607, 1.377, 0.000)),
260
+ ("CD1", 5, (0.709, 1.195, -0.000)),
261
+ ("CD2", 5, (0.706, -1.196, 0.000)),
262
+ ("CE1", 5, (2.102, 1.198, -0.000)),
263
+ ("CE2", 5, (2.098, -1.201, -0.000)),
264
+ ("CZ", 5, (2.794, -0.003, -0.001)),
265
+ ],
266
+ "PRO": [
267
+ ("N", 0, (-0.566, 1.351, -0.000)),
268
+ ("CA", 0, (0.000, 0.000, 0.000)),
269
+ ("C", 0, (1.527, -0.000, 0.000)),
270
+ ("CB", 0, (-0.546, -0.611, -1.293)),
271
+ ("O", 3, (0.621, 1.066, 0.000)),
272
+ ("CG", 4, (0.382, 1.445, 0.0)),
273
+ # ('CD', 5, (0.427, 1.440, 0.0)),
274
+ ("CD", 5, (0.477, 1.424, 0.0)), # manually made angle 2 degrees larger
275
+ ],
276
+ "SER": [
277
+ ("N", 0, (-0.529, 1.360, -0.000)),
278
+ ("CA", 0, (0.000, 0.000, 0.000)),
279
+ ("C", 0, (1.525, -0.000, -0.000)),
280
+ ("CB", 0, (-0.518, -0.777, -1.211)),
281
+ ("O", 3, (0.626, 1.062, -0.000)),
282
+ ("OG", 4, (0.503, 1.325, 0.000)),
283
+ ],
284
+ "THR": [
285
+ ("N", 0, (-0.517, 1.364, 0.000)),
286
+ ("CA", 0, (0.000, 0.000, 0.000)),
287
+ ("C", 0, (1.526, 0.000, -0.000)),
288
+ ("CB", 0, (-0.516, -0.793, -1.215)),
289
+ ("O", 3, (0.626, 1.062, 0.000)),
290
+ ("CG2", 4, (0.550, -0.718, -1.228)),
291
+ ("OG1", 4, (0.472, 1.353, 0.000)),
292
+ ],
293
+ "TRP": [
294
+ ("N", 0, (-0.521, 1.363, 0.000)),
295
+ ("CA", 0, (0.000, 0.000, 0.000)),
296
+ ("C", 0, (1.525, -0.000, 0.000)),
297
+ ("CB", 0, (-0.523, -0.776, -1.212)),
298
+ ("O", 3, (0.627, 1.062, 0.000)),
299
+ ("CG", 4, (0.609, 1.370, -0.000)),
300
+ ("CD1", 5, (0.824, 1.091, 0.000)),
301
+ ("CD2", 5, (0.854, -1.148, -0.005)),
302
+ ("CE2", 5, (2.186, -0.678, -0.007)),
303
+ ("CE3", 5, (0.622, -2.530, -0.007)),
304
+ ("NE1", 5, (2.140, 0.690, -0.004)),
305
+ ("CH2", 5, (3.028, -2.890, -0.013)),
306
+ ("CZ2", 5, (3.283, -1.543, -0.011)),
307
+ ("CZ3", 5, (1.715, -3.389, -0.011)),
308
+ ],
309
+ "TYR": [
310
+ ("N", 0, (-0.522, 1.362, 0.000)),
311
+ ("CA", 0, (0.000, 0.000, 0.000)),
312
+ ("C", 0, (1.524, -0.000, -0.000)),
313
+ ("CB", 0, (-0.522, -0.776, -1.213)),
314
+ ("O", 3, (0.627, 1.062, -0.000)),
315
+ ("CG", 4, (0.607, 1.382, -0.000)),
316
+ ("CD1", 5, (0.716, 1.195, -0.000)),
317
+ ("CD2", 5, (0.713, -1.194, -0.001)),
318
+ ("CE1", 5, (2.107, 1.200, -0.002)),
319
+ ("CE2", 5, (2.104, -1.201, -0.003)),
320
+ ("OH", 5, (4.168, -0.002, -0.005)),
321
+ ("CZ", 5, (2.791, -0.001, -0.003)),
322
+ ],
323
+ "VAL": [
324
+ ("N", 0, (-0.494, 1.373, -0.000)),
325
+ ("CA", 0, (0.000, 0.000, 0.000)),
326
+ ("C", 0, (1.527, -0.000, -0.000)),
327
+ ("CB", 0, (-0.533, -0.795, -1.213)),
328
+ ("O", 3, (0.627, 1.062, -0.000)),
329
+ ("CG1", 4, (0.540, 1.429, -0.000)),
330
+ ("CG2", 4, (0.533, -0.776, 1.203)),
331
+ ],
332
+ }
333
+
334
+ # A list of atoms (excluding hydrogen) for each AA type. PDB naming convention.
335
+ residue_atoms: Dict[str, List[str]] = {
336
+ "ALA": ["C", "CA", "CB", "N", "O"],
337
+ "ARG": ["C", "CA", "CB", "CG", "CD", "CZ", "N", "NE", "O", "NH1", "NH2"],
338
+ "ASP": ["C", "CA", "CB", "CG", "N", "O", "OD1", "OD2"],
339
+ "ASN": ["C", "CA", "CB", "CG", "N", "ND2", "O", "OD1"],
340
+ "CYS": ["C", "CA", "CB", "N", "O", "SG"],
341
+ "GLU": ["C", "CA", "CB", "CG", "CD", "N", "O", "OE1", "OE2"],
342
+ "GLN": ["C", "CA", "CB", "CG", "CD", "N", "NE2", "O", "OE1"],
343
+ "GLY": ["C", "CA", "N", "O"],
344
+ "HIS": ["C", "CA", "CB", "CG", "CD2", "CE1", "N", "ND1", "NE2", "O"],
345
+ "ILE": ["C", "CA", "CB", "CG1", "CG2", "CD1", "N", "O"],
346
+ "LEU": ["C", "CA", "CB", "CG", "CD1", "CD2", "N", "O"],
347
+ "LYS": ["C", "CA", "CB", "CG", "CD", "CE", "N", "NZ", "O"],
348
+ "MET": ["C", "CA", "CB", "CG", "CE", "N", "O", "SD"],
349
+ "PHE": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O"],
350
+ "PRO": ["C", "CA", "CB", "CG", "CD", "N", "O"],
351
+ "SER": ["C", "CA", "CB", "N", "O", "OG"],
352
+ "THR": ["C", "CA", "CB", "CG2", "N", "O", "OG1"],
353
+ "TRP": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE2", "CE3", "CZ2", "CZ3", "CH2", "N", "NE1", "O"],
354
+ "TYR": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O", "OH"],
355
+ "VAL": ["C", "CA", "CB", "CG1", "CG2", "N", "O"],
356
+ }
357
+
358
+ # Naming swaps for ambiguous atom names.
359
+ # Due to symmetries in the amino acids the naming of atoms is ambiguous in
360
+ # 4 of the 20 amino acids.
361
+ # (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities
362
+ # in LEU, VAL and ARG can be resolved by using the 3d constellations of
363
+ # the 'ambiguous' atoms and their neighbours)
364
+ # TODO: ^ interpret this
365
+ residue_atom_renaming_swaps: Dict[str, Dict[str, str]] = {
366
+ "ASP": {"OD1": "OD2"},
367
+ "GLU": {"OE1": "OE2"},
368
+ "PHE": {"CD1": "CD2", "CE1": "CE2"},
369
+ "TYR": {"CD1": "CD2", "CE1": "CE2"},
370
+ }
371
+
372
+ # Van der Waals radii [Angstroem] of the atoms (from Wikipedia)
373
+ van_der_waals_radius: Dict[str, float] = {
374
+ "C": 1.7,
375
+ "N": 1.55,
376
+ "O": 1.52,
377
+ "S": 1.8,
378
+ }
379
+
380
+ Bond = collections.namedtuple("Bond", ["atom1_name", "atom2_name", "length", "stddev"])
381
+ BondAngle = collections.namedtuple(
382
+ "BondAngle",
383
+ ["atom1_name", "atom2_name", "atom3name", "angle_rad", "stddev"],
384
+ )
385
+
386
+
387
+ def map_structure_with_atom_order(in_list: list, first_call: bool = True) -> list:
388
+ # Maps strings in a nested list structure to their corresponding index in atom_order
389
+ if first_call:
390
+ in_list = copy.deepcopy(in_list)
391
+ for i in range(len(in_list)):
392
+ if isinstance(in_list[i], list):
393
+ in_list[i] = map_structure_with_atom_order(in_list[i], first_call=False)
394
+ elif isinstance(in_list[i], str):
395
+ in_list[i] = atom_order[in_list[i]]
396
+ else:
397
+ raise ValueError("Unexpected type when mapping nested lists!")
398
+ return in_list
399
+
400
+
401
+ @functools.lru_cache(maxsize=None)
402
+ def load_stereo_chemical_props() -> (
403
+ Tuple[
404
+ Mapping[str, List[Bond]],
405
+ Mapping[str, List[Bond]],
406
+ Mapping[str, List[BondAngle]],
407
+ ]
408
+ ):
409
+ """Load stereo_chemical_props.txt into a nice structure.
410
+
411
+ Load literature values for bond lengths and bond angles and translate bond angles into the length of the opposite
412
+ edge of the triangle ("residue_virtual_bonds").
413
+
414
+ Returns:
415
+ residue_bonds: dict that maps resname --> list of Bond tuples residue_virtual_bonds: dict that maps resname -->
416
+ list of Bond tuples residue_bond_angles: dict that maps resname --> list of BondAngle tuples
417
+ """
418
+ # TODO: this file should be downloaded in a setup script
419
+ stereo_chemical_props = resources.read_text("openfold.resources", "stereo_chemical_props.txt")
420
+
421
+ lines_iter = iter(stereo_chemical_props.splitlines())
422
+ # Load bond lengths.
423
+ residue_bonds: Dict[str, List[Bond]] = {}
424
+ next(lines_iter) # Skip header line.
425
+ for line in lines_iter:
426
+ if line.strip() == "-":
427
+ break
428
+ bond, resname, bond_length, stddev = line.split()
429
+ atom1, atom2 = bond.split("-")
430
+ if resname not in residue_bonds:
431
+ residue_bonds[resname] = []
432
+ residue_bonds[resname].append(Bond(atom1, atom2, float(bond_length), float(stddev)))
433
+ residue_bonds["UNK"] = []
434
+
435
+ # Load bond angles.
436
+ residue_bond_angles: Dict[str, List[BondAngle]] = {}
437
+ next(lines_iter) # Skip empty line.
438
+ next(lines_iter) # Skip header line.
439
+ for line in lines_iter:
440
+ if line.strip() == "-":
441
+ break
442
+ bond, resname, angle_degree, stddev_degree = line.split()
443
+ atom1, atom2, atom3 = bond.split("-")
444
+ if resname not in residue_bond_angles:
445
+ residue_bond_angles[resname] = []
446
+ residue_bond_angles[resname].append(
447
+ BondAngle(
448
+ atom1,
449
+ atom2,
450
+ atom3,
451
+ float(angle_degree) / 180.0 * np.pi,
452
+ float(stddev_degree) / 180.0 * np.pi,
453
+ )
454
+ )
455
+ residue_bond_angles["UNK"] = []
456
+
457
+ def make_bond_key(atom1_name: str, atom2_name: str) -> str:
458
+ """Unique key to lookup bonds."""
459
+ return "-".join(sorted([atom1_name, atom2_name]))
460
+
461
+ # Translate bond angles into distances ("virtual bonds").
462
+ residue_virtual_bonds: Dict[str, List[Bond]] = {}
463
+ for resname, bond_angles in residue_bond_angles.items():
464
+ # Create a fast lookup dict for bond lengths.
465
+ bond_cache: Dict[str, Bond] = {}
466
+ for b in residue_bonds[resname]:
467
+ bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b
468
+ residue_virtual_bonds[resname] = []
469
+ for ba in bond_angles:
470
+ bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]
471
+ bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]
472
+
473
+ # Compute distance between atom1 and atom3 using the law of cosines
474
+ # c^2 = a^2 + b^2 - 2ab*cos(gamma).
475
+ gamma = ba.angle_rad
476
+ length = np.sqrt(bond1.length**2 + bond2.length**2 - 2 * bond1.length * bond2.length * np.cos(gamma))
477
+
478
+ # Propagation of uncertainty assuming uncorrelated errors.
479
+ dl_outer = 0.5 / length
480
+ dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer
481
+ dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer
482
+ dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer
483
+ stddev = np.sqrt(
484
+ (dl_dgamma * ba.stddev) ** 2 + (dl_db1 * bond1.stddev) ** 2 + (dl_db2 * bond2.stddev) ** 2
485
+ )
486
+ residue_virtual_bonds[resname].append(Bond(ba.atom1_name, ba.atom3name, length, stddev))
487
+
488
+ return (residue_bonds, residue_virtual_bonds, residue_bond_angles)
489
+
490
+
491
+ # Between-residue bond lengths for general bonds (first element) and for Proline
492
+ # (second element).
493
+ between_res_bond_length_c_n: Tuple[float, float] = (1.329, 1.341)
494
+ between_res_bond_length_stddev_c_n: Tuple[float, float] = (0.014, 0.016)
495
+
496
+ # Between-residue cos_angles.
497
+ between_res_cos_angles_c_n_ca: Tuple[float, float] = (-0.5203, 0.0353) # degrees: 121.352 +- 2.315
498
+ between_res_cos_angles_ca_c_n: Tuple[float, float] = (-0.4473, 0.0311) # degrees: 116.568 +- 1.995
499
+
500
+ # This mapping is used when we need to store atom data in a format that requires
501
+ # fixed atom data size for every residue (e.g. a numpy array).
502
+ atom_types: List[str] = [
503
+ "N",
504
+ "CA",
505
+ "C",
506
+ "CB",
507
+ "O",
508
+ "CG",
509
+ "CG1",
510
+ "CG2",
511
+ "OG",
512
+ "OG1",
513
+ "SG",
514
+ "CD",
515
+ "CD1",
516
+ "CD2",
517
+ "ND1",
518
+ "ND2",
519
+ "OD1",
520
+ "OD2",
521
+ "SD",
522
+ "CE",
523
+ "CE1",
524
+ "CE2",
525
+ "CE3",
526
+ "NE",
527
+ "NE1",
528
+ "NE2",
529
+ "OE1",
530
+ "OE2",
531
+ "CH2",
532
+ "NH1",
533
+ "NH2",
534
+ "OH",
535
+ "CZ",
536
+ "CZ2",
537
+ "CZ3",
538
+ "NZ",
539
+ "OXT",
540
+ ]
541
+ atom_order: Dict[str, int] = {atom_type: i for i, atom_type in enumerate(atom_types)}
542
+ atom_type_num = len(atom_types) # := 37.
543
+
544
+ # A compact atom encoding with 14 columns
545
+ # pylint: disable=line-too-long
546
+ # pylint: disable=bad-whitespace
547
+ restype_name_to_atom14_names: Dict[str, List[str]] = {
548
+ "ALA": ["N", "CA", "C", "O", "CB", "", "", "", "", "", "", "", "", ""],
549
+ "ARG": ["N", "CA", "C", "O", "CB", "CG", "CD", "NE", "CZ", "NH1", "NH2", "", "", ""],
550
+ "ASN": ["N", "CA", "C", "O", "CB", "CG", "OD1", "ND2", "", "", "", "", "", ""],
551
+ "ASP": ["N", "CA", "C", "O", "CB", "CG", "OD1", "OD2", "", "", "", "", "", ""],
552
+ "CYS": ["N", "CA", "C", "O", "CB", "SG", "", "", "", "", "", "", "", ""],
553
+ "GLN": ["N", "CA", "C", "O", "CB", "CG", "CD", "OE1", "NE2", "", "", "", "", ""],
554
+ "GLU": ["N", "CA", "C", "O", "CB", "CG", "CD", "OE1", "OE2", "", "", "", "", ""],
555
+ "GLY": ["N", "CA", "C", "O", "", "", "", "", "", "", "", "", "", ""],
556
+ "HIS": ["N", "CA", "C", "O", "CB", "CG", "ND1", "CD2", "CE1", "NE2", "", "", "", ""],
557
+ "ILE": ["N", "CA", "C", "O", "CB", "CG1", "CG2", "CD1", "", "", "", "", "", ""],
558
+ "LEU": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "", "", "", "", "", ""],
559
+ "LYS": ["N", "CA", "C", "O", "CB", "CG", "CD", "CE", "NZ", "", "", "", "", ""],
560
+ "MET": ["N", "CA", "C", "O", "CB", "CG", "SD", "CE", "", "", "", "", "", ""],
561
+ "PHE": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "", "", ""],
562
+ "PRO": ["N", "CA", "C", "O", "CB", "CG", "CD", "", "", "", "", "", "", ""],
563
+ "SER": ["N", "CA", "C", "O", "CB", "OG", "", "", "", "", "", "", "", ""],
564
+ "THR": ["N", "CA", "C", "O", "CB", "OG1", "CG2", "", "", "", "", "", "", ""],
565
+ "TRP": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "NE1", "CE2", "CE3", "CZ2", "CZ3", "CH2"],
566
+ "TYR": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "OH", "", ""],
567
+ "VAL": ["N", "CA", "C", "O", "CB", "CG1", "CG2", "", "", "", "", "", "", ""],
568
+ "UNK": ["", "", "", "", "", "", "", "", "", "", "", "", "", ""],
569
+ }
570
+ # pylint: enable=line-too-long
571
+ # pylint: enable=bad-whitespace
572
+
573
+
574
+ # This is the standard residue order when coding AA type as a number.
575
+ # Reproduce it by taking 3-letter AA codes and sorting them alphabetically.
576
+ restypes: List[str] = [
577
+ "A",
578
+ "R",
579
+ "N",
580
+ "D",
581
+ "C",
582
+ "Q",
583
+ "E",
584
+ "G",
585
+ "H",
586
+ "I",
587
+ "L",
588
+ "K",
589
+ "M",
590
+ "F",
591
+ "P",
592
+ "S",
593
+ "T",
594
+ "W",
595
+ "Y",
596
+ "V",
597
+ ]
598
+ restype_order: Dict[str, int] = {restype: i for i, restype in enumerate(restypes)}
599
+ restype_num = len(restypes) # := 20.
600
+ unk_restype_index = restype_num # Catch-all index for unknown restypes.
601
+
602
+ restypes_with_x: List[str] = restypes + ["X"]
603
+ restype_order_with_x: Dict[str, int] = {restype: i for i, restype in enumerate(restypes_with_x)}
604
+
605
+
606
+ def sequence_to_onehot(sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False) -> np.ndarray:
607
+ """Maps the given sequence into a one-hot encoded matrix.
608
+
609
+ Args:
610
+ sequence: An amino acid sequence.
611
+ mapping: A dictionary mapping amino acids to integers.
612
+ map_unknown_to_x: If True, any amino acid that is not in the mapping will be
613
+ mapped to the unknown amino acid 'X'. If the mapping doesn't contain amino acid 'X', an error will be thrown.
614
+ If False, any amino acid not in the mapping will throw an error.
615
+
616
+ Returns:
617
+ A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of the sequence.
618
+
619
+ Raises:
620
+ ValueError: If the mapping doesn't contain values from 0 to
621
+ num_unique_aas - 1 without any gaps.
622
+ """
623
+ num_entries = max(mapping.values()) + 1
624
+
625
+ if sorted(set(mapping.values())) != list(range(num_entries)):
626
+ raise ValueError(
627
+ "The mapping must have values from 0 to num_unique_aas-1 without any gaps. Got: %s"
628
+ % sorted(mapping.values())
629
+ )
630
+
631
+ one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)
632
+
633
+ for aa_index, aa_type in enumerate(sequence):
634
+ if map_unknown_to_x:
635
+ if aa_type.isalpha() and aa_type.isupper():
636
+ aa_id = mapping.get(aa_type, mapping["X"])
637
+ else:
638
+ raise ValueError(f"Invalid character in the sequence: {aa_type}")
639
+ else:
640
+ aa_id = mapping[aa_type]
641
+ one_hot_arr[aa_index, aa_id] = 1
642
+
643
+ return one_hot_arr
644
+
645
+
646
+ restype_1to3: Dict[str, str] = {
647
+ "A": "ALA",
648
+ "R": "ARG",
649
+ "N": "ASN",
650
+ "D": "ASP",
651
+ "C": "CYS",
652
+ "Q": "GLN",
653
+ "E": "GLU",
654
+ "G": "GLY",
655
+ "H": "HIS",
656
+ "I": "ILE",
657
+ "L": "LEU",
658
+ "K": "LYS",
659
+ "M": "MET",
660
+ "F": "PHE",
661
+ "P": "PRO",
662
+ "S": "SER",
663
+ "T": "THR",
664
+ "W": "TRP",
665
+ "Y": "TYR",
666
+ "V": "VAL",
667
+ }
668
+
669
+
670
+ # NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple
671
+ # 1-to-1 mapping of 3 letter names to one letter names. The latter contains
672
+ # many more, and less common, three letter names as keys and maps many of these
673
+ # to the same one letter name (including 'X' and 'U' which we don't use here).
674
+ restype_3to1: Dict[str, str] = {v: k for k, v in restype_1to3.items()}
675
+
676
+ # Define a restype name for all unknown residues.
677
+ unk_restype = "UNK"
678
+
679
+ resnames: List[str] = [restype_1to3[r] for r in restypes] + [unk_restype]
680
+ resname_to_idx: Dict[str, int] = {resname: i for i, resname in enumerate(resnames)}
681
+
682
+
683
+ # The mapping here uses hhblits convention, so that B is mapped to D, J and O
684
+ # are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the
685
+ # remaining 20 amino acids are kept in alphabetical order.
686
+ # There are 2 non-amino acid codes, X (representing any amino acid) and
687
+ # "-" representing a missing amino acid in an alignment. The id for these
688
+ # codes is put at the end (20 and 21) so that they can easily be ignored if
689
+ # desired.
690
+ HHBLITS_AA_TO_ID: Dict[str, int] = {
691
+ "A": 0,
692
+ "B": 2,
693
+ "C": 1,
694
+ "D": 2,
695
+ "E": 3,
696
+ "F": 4,
697
+ "G": 5,
698
+ "H": 6,
699
+ "I": 7,
700
+ "J": 20,
701
+ "K": 8,
702
+ "L": 9,
703
+ "M": 10,
704
+ "N": 11,
705
+ "O": 20,
706
+ "P": 12,
707
+ "Q": 13,
708
+ "R": 14,
709
+ "S": 15,
710
+ "T": 16,
711
+ "U": 1,
712
+ "V": 17,
713
+ "W": 18,
714
+ "X": 20,
715
+ "Y": 19,
716
+ "Z": 3,
717
+ "-": 21,
718
+ }
719
+
720
+ # Partial inversion of HHBLITS_AA_TO_ID.
721
+ ID_TO_HHBLITS_AA: Dict[int, str] = {
722
+ 0: "A",
723
+ 1: "C", # Also U.
724
+ 2: "D", # Also B.
725
+ 3: "E", # Also Z.
726
+ 4: "F",
727
+ 5: "G",
728
+ 6: "H",
729
+ 7: "I",
730
+ 8: "K",
731
+ 9: "L",
732
+ 10: "M",
733
+ 11: "N",
734
+ 12: "P",
735
+ 13: "Q",
736
+ 14: "R",
737
+ 15: "S",
738
+ 16: "T",
739
+ 17: "V",
740
+ 18: "W",
741
+ 19: "Y",
742
+ 20: "X", # Includes J and O.
743
+ 21: "-",
744
+ }
745
+
746
+ restypes_with_x_and_gap: List[str] = restypes + ["X", "-"]
747
+ MAP_HHBLITS_AATYPE_TO_OUR_AATYPE: Tuple[int, ...] = tuple(
748
+ restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i]) for i in range(len(restypes_with_x_and_gap))
749
+ )
750
+
751
+
752
+ def _make_standard_atom_mask() -> np.ndarray:
753
+ """Returns [num_res_types, num_atom_types] mask array."""
754
+ # +1 to account for unknown (all 0s).
755
+ mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)
756
+ for restype, restype_letter in enumerate(restypes):
757
+ restype_name = restype_1to3[restype_letter]
758
+ atom_names = residue_atoms[restype_name]
759
+ for atom_name in atom_names:
760
+ atom_type = atom_order[atom_name]
761
+ mask[restype, atom_type] = 1
762
+ return mask
763
+
764
+
765
+ STANDARD_ATOM_MASK = _make_standard_atom_mask()
766
+
767
+
768
+ # A one hot representation for the first and second atoms defining the axis
769
+ # of rotation for each chi-angle in each residue.
770
+ def chi_angle_atom(atom_index: int) -> np.ndarray:
771
+ """Define chi-angle rigid groups via one-hot representations."""
772
+ chi_angles_index = {}
773
+ one_hots = []
774
+
775
+ for k, v in chi_angles_atoms.items():
776
+ indices = [atom_types.index(s[atom_index]) for s in v]
777
+ indices.extend([-1] * (4 - len(indices)))
778
+ chi_angles_index[k] = indices
779
+
780
+ for r in restypes:
781
+ res3 = restype_1to3[r]
782
+ one_hot = np.eye(atom_type_num)[chi_angles_index[res3]]
783
+ one_hots.append(one_hot)
784
+
785
+ one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`.
786
+ one_hot = np.stack(one_hots, axis=0)
787
+ one_hot = np.transpose(one_hot, [0, 2, 1])
788
+
789
+ return one_hot
790
+
791
+
792
+ chi_atom_1_one_hot = chi_angle_atom(1)
793
+ chi_atom_2_one_hot = chi_angle_atom(2)
794
+
795
+ # An array like chi_angles_atoms but using indices rather than names.
796
+ chi_angles_atom_indices_list: List[List[List[str]]] = [chi_angles_atoms[restype_1to3[r]] for r in restypes]
797
+ chi_angles_atom_indices_ours: list = map_structure_with_atom_order(chi_angles_atom_indices_list)
798
+ chi_angles_atom_indices = np.array(
799
+ [chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms))) for chi_atoms in chi_angles_atom_indices_list]
800
+ )
801
+
802
+ # Mapping from (res_name, atom_name) pairs to the atom's chi group index
803
+ # and atom index within that group.
804
+ chi_groups_for_atom: Dict[Tuple[str, str], List[Tuple[int, int]]] = collections.defaultdict(list)
805
+ for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items():
806
+ for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res):
807
+ for atom_i, atom in enumerate(chi_group):
808
+ chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i))
809
+ chi_groups_for_atom = dict(chi_groups_for_atom)
810
+
811
+
812
+ def _make_rigid_transformation_4x4(ex: np.ndarray, ey: np.ndarray, translation: np.ndarray) -> np.ndarray:
813
+ """Create a rigid 4x4 transformation matrix from two axes and transl."""
814
+ # Normalize ex.
815
+ ex_normalized = ex / np.linalg.norm(ex)
816
+
817
+ # make ey perpendicular to ex
818
+ ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized
819
+ ey_normalized /= np.linalg.norm(ey_normalized)
820
+
821
+ # compute ez as cross product
822
+ eznorm = np.cross(ex_normalized, ey_normalized)
823
+ m = np.stack([ex_normalized, ey_normalized, eznorm, translation]).transpose()
824
+ m = np.concatenate([m, [[0.0, 0.0, 0.0, 1.0]]], axis=0)
825
+ return m
826
+
827
+
828
+ # create an array with (restype, atomtype) --> rigid_group_idx
829
+ # and an array with (restype, atomtype, coord) for the atom positions
830
+ # and compute affine transformation matrices (4,4) from one rigid group to the
831
+ # previous group
832
+ restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=int)
833
+ restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
834
+ restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32)
835
+ restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=int)
836
+ restype_atom14_mask = np.zeros([21, 14], dtype=np.float32)
837
+ restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32)
838
+ restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32)
839
+
840
+
841
+ def _make_rigid_group_constants() -> None:
842
+ """Fill the arrays above."""
843
+ for restype, restype_letter in enumerate(restypes):
844
+ resname = restype_1to3[restype_letter]
845
+ for atomname, group_idx, atom_position in rigid_group_atom_positions[resname]:
846
+ atomtype = atom_order[atomname]
847
+ restype_atom37_to_rigid_group[restype, atomtype] = group_idx
848
+ restype_atom37_mask[restype, atomtype] = 1
849
+ restype_atom37_rigid_group_positions[restype, atomtype, :] = atom_position
850
+
851
+ atom14idx = restype_name_to_atom14_names[resname].index(atomname)
852
+ restype_atom14_to_rigid_group[restype, atom14idx] = group_idx
853
+ restype_atom14_mask[restype, atom14idx] = 1
854
+ restype_atom14_rigid_group_positions[restype, atom14idx, :] = atom_position
855
+
856
+ for restype, restype_letter in enumerate(restypes):
857
+ resname = restype_1to3[restype_letter]
858
+ atom_positions: Dict[str, np.ndarray] = {
859
+ name: np.array(pos) for name, _, pos in rigid_group_atom_positions[resname]
860
+ }
861
+
862
+ # backbone to backbone is the identity transform
863
+ restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4)
864
+
865
+ # pre-omega-frame to backbone (currently dummy identity matrix)
866
+ restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4)
867
+
868
+ # phi-frame to backbone
869
+ mat = _make_rigid_transformation_4x4(
870
+ ex=atom_positions["N"] - atom_positions["CA"],
871
+ ey=np.array([1.0, 0.0, 0.0]),
872
+ translation=atom_positions["N"],
873
+ )
874
+ restype_rigid_group_default_frame[restype, 2, :, :] = mat
875
+
876
+ # psi-frame to backbone
877
+ mat = _make_rigid_transformation_4x4(
878
+ ex=atom_positions["C"] - atom_positions["CA"],
879
+ ey=atom_positions["CA"] - atom_positions["N"],
880
+ translation=atom_positions["C"],
881
+ )
882
+ restype_rigid_group_default_frame[restype, 3, :, :] = mat
883
+
884
+ # chi1-frame to backbone
885
+ if chi_angles_mask[restype][0]:
886
+ base_atom_names = chi_angles_atoms[resname][0]
887
+ base_atom_positions = [atom_positions[name] for name in base_atom_names]
888
+ mat = _make_rigid_transformation_4x4(
889
+ ex=base_atom_positions[2] - base_atom_positions[1],
890
+ ey=base_atom_positions[0] - base_atom_positions[1],
891
+ translation=base_atom_positions[2],
892
+ )
893
+ restype_rigid_group_default_frame[restype, 4, :, :] = mat
894
+
895
+ # chi2-frame to chi1-frame
896
+ # chi3-frame to chi2-frame
897
+ # chi4-frame to chi3-frame
898
+ # luckily all rotation axes for the next frame start at (0,0,0) of the
899
+ # previous frame
900
+ for chi_idx in range(1, 4):
901
+ if chi_angles_mask[restype][chi_idx]:
902
+ axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2]
903
+ axis_end_atom_position = atom_positions[axis_end_atom_name]
904
+ mat = _make_rigid_transformation_4x4(
905
+ ex=axis_end_atom_position,
906
+ ey=np.array([-1.0, 0.0, 0.0]),
907
+ translation=axis_end_atom_position,
908
+ )
909
+ restype_rigid_group_default_frame[restype, 4 + chi_idx, :, :] = mat
910
+
911
+
912
+ _make_rigid_group_constants()
913
+
914
+
915
+ def make_atom14_dists_bounds(
916
+ overlap_tolerance: float = 1.5,
917
+ bond_length_tolerance_factor: int = 15,
918
+ ) -> Dict[str, np.ndarray]:
919
+ """compute upper and lower bounds for bonds to assess violations."""
920
+ restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32)
921
+ restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32)
922
+ restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32)
923
+ residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props()
924
+ for restype, restype_letter in enumerate(restypes):
925
+ resname = restype_1to3[restype_letter]
926
+ atom_list = restype_name_to_atom14_names[resname]
927
+
928
+ # create lower and upper bounds for clashes
929
+ for atom1_idx, atom1_name in enumerate(atom_list):
930
+ if not atom1_name:
931
+ continue
932
+ atom1_radius = van_der_waals_radius[atom1_name[0]]
933
+ for atom2_idx, atom2_name in enumerate(atom_list):
934
+ if (not atom2_name) or atom1_idx == atom2_idx:
935
+ continue
936
+ atom2_radius = van_der_waals_radius[atom2_name[0]]
937
+ lower = atom1_radius + atom2_radius - overlap_tolerance
938
+ upper = 1e10
939
+ restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
940
+ restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
941
+ restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
942
+ restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
943
+
944
+ # overwrite lower and upper bounds for bonds and angles
945
+ for b in residue_bonds[resname] + residue_virtual_bonds[resname]:
946
+ atom1_idx = atom_list.index(b.atom1_name)
947
+ atom2_idx = atom_list.index(b.atom2_name)
948
+ lower = b.length - bond_length_tolerance_factor * b.stddev
949
+ upper = b.length + bond_length_tolerance_factor * b.stddev
950
+ restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
951
+ restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
952
+ restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
953
+ restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
954
+ restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev
955
+ restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev
956
+ return {
957
+ "lower_bound": restype_atom14_bond_lower_bound, # shape (21,14,14)
958
+ "upper_bound": restype_atom14_bond_upper_bound, # shape (21,14,14)
959
+ "stddev": restype_atom14_bond_stddev, # shape (21,14,14)
960
+ }
961
+
962
+
963
+ restype_atom14_ambiguous_atoms = np.zeros((21, 14), dtype=np.float32)
964
+ restype_atom14_ambiguous_atoms_swap_idx: np.ndarray = np.tile(np.arange(14, dtype=int), (21, 1))
965
+
966
+
967
+ def _make_atom14_ambiguity_feats() -> None:
968
+ for res, pairs in residue_atom_renaming_swaps.items():
969
+ res_idx = restype_order[restype_3to1[res]]
970
+ for atom1, atom2 in pairs.items():
971
+ atom1_idx = restype_name_to_atom14_names[res].index(atom1)
972
+ atom2_idx = restype_name_to_atom14_names[res].index(atom2)
973
+ restype_atom14_ambiguous_atoms[res_idx, atom1_idx] = 1
974
+ restype_atom14_ambiguous_atoms[res_idx, atom2_idx] = 1
975
+ restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom1_idx] = atom2_idx
976
+ restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom2_idx] = atom1_idx
977
+
978
+
979
+ _make_atom14_ambiguity_feats()
980
+
981
+
982
+ def aatype_to_str_sequence(aatype: Sequence[int]) -> str:
983
+ return "".join([restypes_with_x[aatype[i]] for i in range(len(aatype))])
venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/rigid_utils.py ADDED
@@ -0,0 +1,1242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 AlQuraishi Laboratory
2
+ # Copyright 2021 DeepMind Technologies Limited
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from __future__ import annotations
17
+
18
+ from functools import lru_cache
19
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
20
+
21
+ import numpy as np
22
+ import torch
23
+
24
+
25
+ def rot_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
26
+ """
27
+ Performs matrix multiplication of two rotation matrix tensors. Written out by hand to avoid AMP downcasting.
28
+
29
+ Args:
30
+ a: [*, 3, 3] left multiplicand
31
+ b: [*, 3, 3] right multiplicand
32
+ Returns:
33
+ The product ab
34
+ """
35
+
36
+ def row_mul(i: int) -> torch.Tensor:
37
+ return torch.stack(
38
+ [
39
+ a[..., i, 0] * b[..., 0, 0] + a[..., i, 1] * b[..., 1, 0] + a[..., i, 2] * b[..., 2, 0],
40
+ a[..., i, 0] * b[..., 0, 1] + a[..., i, 1] * b[..., 1, 1] + a[..., i, 2] * b[..., 2, 1],
41
+ a[..., i, 0] * b[..., 0, 2] + a[..., i, 1] * b[..., 1, 2] + a[..., i, 2] * b[..., 2, 2],
42
+ ],
43
+ dim=-1,
44
+ )
45
+
46
+ return torch.stack(
47
+ [
48
+ row_mul(0),
49
+ row_mul(1),
50
+ row_mul(2),
51
+ ],
52
+ dim=-2,
53
+ )
54
+
55
+
56
+ def rot_vec_mul(r: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
57
+ """
58
+ Applies a rotation to a vector. Written out by hand to avoid transfer to avoid AMP downcasting.
59
+
60
+ Args:
61
+ r: [*, 3, 3] rotation matrices
62
+ t: [*, 3] coordinate tensors
63
+ Returns:
64
+ [*, 3] rotated coordinates
65
+ """
66
+ x, y, z = torch.unbind(t, dim=-1)
67
+ return torch.stack(
68
+ [
69
+ r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z,
70
+ r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z,
71
+ r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z,
72
+ ],
73
+ dim=-1,
74
+ )
75
+
76
+
77
+ @lru_cache(maxsize=None)
78
+ def identity_rot_mats(
79
+ batch_dims: Tuple[int, ...],
80
+ dtype: Optional[torch.dtype] = None,
81
+ device: Optional[torch.device] = None,
82
+ requires_grad: bool = True,
83
+ ) -> torch.Tensor:
84
+ rots = torch.eye(3, dtype=dtype, device=device, requires_grad=requires_grad)
85
+ rots = rots.view(*((1,) * len(batch_dims)), 3, 3)
86
+ rots = rots.expand(*batch_dims, -1, -1)
87
+ rots = rots.contiguous()
88
+
89
+ return rots
90
+
91
+
92
+ @lru_cache(maxsize=None)
93
+ def identity_trans(
94
+ batch_dims: Tuple[int, ...],
95
+ dtype: Optional[torch.dtype] = None,
96
+ device: Optional[torch.device] = None,
97
+ requires_grad: bool = True,
98
+ ) -> torch.Tensor:
99
+ trans = torch.zeros((*batch_dims, 3), dtype=dtype, device=device, requires_grad=requires_grad)
100
+ return trans
101
+
102
+
103
+ @lru_cache(maxsize=None)
104
+ def identity_quats(
105
+ batch_dims: Tuple[int, ...],
106
+ dtype: Optional[torch.dtype] = None,
107
+ device: Optional[torch.device] = None,
108
+ requires_grad: bool = True,
109
+ ) -> torch.Tensor:
110
+ quat = torch.zeros((*batch_dims, 4), dtype=dtype, device=device, requires_grad=requires_grad)
111
+
112
+ with torch.no_grad():
113
+ quat[..., 0] = 1
114
+
115
+ return quat
116
+
117
+
118
+ _quat_elements: List[str] = ["a", "b", "c", "d"]
119
+ _qtr_keys: List[str] = [l1 + l2 for l1 in _quat_elements for l2 in _quat_elements]
120
+ _qtr_ind_dict: Dict[str, int] = {key: ind for ind, key in enumerate(_qtr_keys)}
121
+
122
+
123
+ def _to_mat(pairs: List[Tuple[str, int]]) -> np.ndarray:
124
+ mat = np.zeros((4, 4))
125
+ for key, value in pairs:
126
+ ind = _qtr_ind_dict[key]
127
+ mat[ind // 4][ind % 4] = value
128
+
129
+ return mat
130
+
131
+
132
+ _QTR_MAT = np.zeros((4, 4, 3, 3))
133
+ _QTR_MAT[..., 0, 0] = _to_mat([("aa", 1), ("bb", 1), ("cc", -1), ("dd", -1)])
134
+ _QTR_MAT[..., 0, 1] = _to_mat([("bc", 2), ("ad", -2)])
135
+ _QTR_MAT[..., 0, 2] = _to_mat([("bd", 2), ("ac", 2)])
136
+ _QTR_MAT[..., 1, 0] = _to_mat([("bc", 2), ("ad", 2)])
137
+ _QTR_MAT[..., 1, 1] = _to_mat([("aa", 1), ("bb", -1), ("cc", 1), ("dd", -1)])
138
+ _QTR_MAT[..., 1, 2] = _to_mat([("cd", 2), ("ab", -2)])
139
+ _QTR_MAT[..., 2, 0] = _to_mat([("bd", 2), ("ac", -2)])
140
+ _QTR_MAT[..., 2, 1] = _to_mat([("cd", 2), ("ab", 2)])
141
+ _QTR_MAT[..., 2, 2] = _to_mat([("aa", 1), ("bb", -1), ("cc", -1), ("dd", 1)])
142
+
143
+
144
+ def quat_to_rot(quat: torch.Tensor) -> torch.Tensor:
145
+ """
146
+ Converts a quaternion to a rotation matrix.
147
+
148
+ Args:
149
+ quat: [*, 4] quaternions
150
+ Returns:
151
+ [*, 3, 3] rotation matrices
152
+ """
153
+ # [*, 4, 4]
154
+ quat = quat[..., None] * quat[..., None, :]
155
+
156
+ # [4, 4, 3, 3]
157
+ mat = _get_quat("_QTR_MAT", dtype=quat.dtype, device=quat.device)
158
+
159
+ # [*, 4, 4, 3, 3]
160
+ shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape)
161
+ quat = quat[..., None, None] * shaped_qtr_mat
162
+
163
+ # [*, 3, 3]
164
+ return torch.sum(quat, dim=(-3, -4))
165
+
166
+
167
+ def rot_to_quat(rot: torch.Tensor) -> torch.Tensor:
168
+ if rot.shape[-2:] != (3, 3):
169
+ raise ValueError("Input rotation is incorrectly shaped")
170
+
171
+ [[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = [[rot[..., i, j] for j in range(3)] for i in range(3)]
172
+
173
+ k = [
174
+ [
175
+ xx + yy + zz,
176
+ zy - yz,
177
+ xz - zx,
178
+ yx - xy,
179
+ ],
180
+ [
181
+ zy - yz,
182
+ xx - yy - zz,
183
+ xy + yx,
184
+ xz + zx,
185
+ ],
186
+ [
187
+ xz - zx,
188
+ xy + yx,
189
+ yy - xx - zz,
190
+ yz + zy,
191
+ ],
192
+ [
193
+ yx - xy,
194
+ xz + zx,
195
+ yz + zy,
196
+ zz - xx - yy,
197
+ ],
198
+ ]
199
+
200
+ _, vectors = torch.linalg.eigh((1.0 / 3.0) * torch.stack([torch.stack(t, dim=-1) for t in k], dim=-2))
201
+ return vectors[..., -1]
202
+
203
+
204
+ _QUAT_MULTIPLY = np.zeros((4, 4, 4))
205
+ _QUAT_MULTIPLY[:, :, 0] = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]
206
+
207
+ _QUAT_MULTIPLY[:, :, 1] = [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0]]
208
+
209
+ _QUAT_MULTIPLY[:, :, 2] = [[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, 1, 0, 0]]
210
+
211
+ _QUAT_MULTIPLY[:, :, 3] = [[0, 0, 0, 1], [0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0]]
212
+
213
+ _QUAT_MULTIPLY_BY_VEC = _QUAT_MULTIPLY[:, 1:, :]
214
+
215
+ _CACHED_QUATS: Dict[str, np.ndarray] = {
216
+ "_QTR_MAT": _QTR_MAT,
217
+ "_QUAT_MULTIPLY": _QUAT_MULTIPLY,
218
+ "_QUAT_MULTIPLY_BY_VEC": _QUAT_MULTIPLY_BY_VEC,
219
+ }
220
+
221
+
222
+ @lru_cache(maxsize=None)
223
+ def _get_quat(quat_key: str, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
224
+ return torch.tensor(_CACHED_QUATS[quat_key], dtype=dtype, device=device)
225
+
226
+
227
+ def quat_multiply(quat1: torch.Tensor, quat2: torch.Tensor) -> torch.Tensor:
228
+ """Multiply a quaternion by another quaternion."""
229
+ mat = _get_quat("_QUAT_MULTIPLY", dtype=quat1.dtype, device=quat1.device)
230
+ reshaped_mat = mat.view((1,) * len(quat1.shape[:-1]) + mat.shape)
231
+ return torch.sum(reshaped_mat * quat1[..., :, None, None] * quat2[..., None, :, None], dim=(-3, -2))
232
+
233
+
234
+ def quat_multiply_by_vec(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
235
+ """Multiply a quaternion by a pure-vector quaternion."""
236
+ mat = _get_quat("_QUAT_MULTIPLY_BY_VEC", dtype=quat.dtype, device=quat.device)
237
+ reshaped_mat = mat.view((1,) * len(quat.shape[:-1]) + mat.shape)
238
+ return torch.sum(reshaped_mat * quat[..., :, None, None] * vec[..., None, :, None], dim=(-3, -2))
239
+
240
+
241
+ def invert_rot_mat(rot_mat: torch.Tensor) -> torch.Tensor:
242
+ return rot_mat.transpose(-1, -2)
243
+
244
+
245
+ def invert_quat(quat: torch.Tensor) -> torch.Tensor:
246
+ quat_prime = quat.clone()
247
+ quat_prime[..., 1:] *= -1
248
+ inv = quat_prime / torch.sum(quat**2, dim=-1, keepdim=True)
249
+ return inv
250
+
251
+
252
+ class Rotation:
253
+ """
254
+ A 3D rotation. Depending on how the object is initialized, the rotation is represented by either a rotation matrix
255
+ or a quaternion, though both formats are made available by helper functions. To simplify gradient computation, the
256
+ underlying format of the rotation cannot be changed in-place. Like Rigid, the class is designed to mimic the
257
+ behavior of a torch Tensor, almost as if each Rotation object were a tensor of rotations, in one format or another.
258
+ """
259
+
260
+ def __init__(
261
+ self,
262
+ rot_mats: Optional[torch.Tensor] = None,
263
+ quats: Optional[torch.Tensor] = None,
264
+ normalize_quats: bool = True,
265
+ ):
266
+ """
267
+ Args:
268
+ rot_mats:
269
+ A [*, 3, 3] rotation matrix tensor. Mutually exclusive with quats
270
+ quats:
271
+ A [*, 4] quaternion. Mutually exclusive with rot_mats. If normalize_quats is not True, must be a unit
272
+ quaternion
273
+ normalize_quats:
274
+ If quats is specified, whether to normalize quats
275
+ """
276
+ if (rot_mats is None and quats is None) or (rot_mats is not None and quats is not None):
277
+ raise ValueError("Exactly one input argument must be specified")
278
+
279
+ if (rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or (quats is not None and quats.shape[-1] != 4):
280
+ raise ValueError("Incorrectly shaped rotation matrix or quaternion")
281
+
282
+ # Force full-precision
283
+ if quats is not None:
284
+ quats = quats.to(dtype=torch.float32)
285
+ if rot_mats is not None:
286
+ rot_mats = rot_mats.to(dtype=torch.float32)
287
+
288
+ if quats is not None and normalize_quats:
289
+ quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)
290
+
291
+ self._rot_mats = rot_mats
292
+ self._quats = quats
293
+
294
+ @staticmethod
295
+ def identity(
296
+ shape,
297
+ dtype: Optional[torch.dtype] = None,
298
+ device: Optional[torch.device] = None,
299
+ requires_grad: bool = True,
300
+ fmt: str = "quat",
301
+ ) -> Rotation:
302
+ """
303
+ Returns an identity Rotation.
304
+
305
+ Args:
306
+ shape:
307
+ The "shape" of the resulting Rotation object. See documentation for the shape property
308
+ dtype:
309
+ The torch dtype for the rotation
310
+ device:
311
+ The torch device for the new rotation
312
+ requires_grad:
313
+ Whether the underlying tensors in the new rotation object should require gradient computation
314
+ fmt:
315
+ One of "quat" or "rot_mat". Determines the underlying format of the new object's rotation
316
+ Returns:
317
+ A new identity rotation
318
+ """
319
+ if fmt == "rot_mat":
320
+ rot_mats = identity_rot_mats(
321
+ shape,
322
+ dtype,
323
+ device,
324
+ requires_grad,
325
+ )
326
+ return Rotation(rot_mats=rot_mats, quats=None)
327
+ elif fmt == "quat":
328
+ quats = identity_quats(shape, dtype, device, requires_grad)
329
+ return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
330
+ else:
331
+ raise ValueError(f"Invalid format: f{fmt}")
332
+
333
+ # Magic methods
334
+
335
+ def __getitem__(self, index: Any) -> Rotation:
336
+ """
337
+ Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape
338
+ property.
339
+
340
+ Args:
341
+ index:
342
+ A torch index. E.g. (1, 3, 2), or (slice(None,))
343
+ Returns:
344
+ The indexed rotation
345
+ """
346
+ if type(index) != tuple:
347
+ index = (index,)
348
+
349
+ if self._rot_mats is not None:
350
+ rot_mats = self._rot_mats[index + (slice(None), slice(None))]
351
+ return Rotation(rot_mats=rot_mats)
352
+ elif self._quats is not None:
353
+ quats = self._quats[index + (slice(None),)]
354
+ return Rotation(quats=quats, normalize_quats=False)
355
+ else:
356
+ raise ValueError("Both rotations are None")
357
+
358
+ def __mul__(self, right: torch.Tensor) -> Rotation:
359
+ """
360
+ Pointwise left multiplication of the rotation with a tensor. Can be used to e.g. mask the Rotation.
361
+
362
+ Args:
363
+ right:
364
+ The tensor multiplicand
365
+ Returns:
366
+ The product
367
+ """
368
+ if not (isinstance(right, torch.Tensor)):
369
+ raise TypeError("The other multiplicand must be a Tensor")
370
+
371
+ if self._rot_mats is not None:
372
+ rot_mats = self._rot_mats * right[..., None, None]
373
+ return Rotation(rot_mats=rot_mats, quats=None)
374
+ elif self._quats is not None:
375
+ quats = self._quats * right[..., None]
376
+ return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
377
+ else:
378
+ raise ValueError("Both rotations are None")
379
+
380
+ def __rmul__(self, left: torch.Tensor) -> Rotation:
381
+ """
382
+ Reverse pointwise multiplication of the rotation with a tensor.
383
+
384
+ Args:
385
+ left:
386
+ The left multiplicand
387
+ Returns:
388
+ The product
389
+ """
390
+ return self.__mul__(left)
391
+
392
+ # Properties
393
+
394
+ @property
395
+ def shape(self) -> torch.Size:
396
+ """
397
+ Returns the virtual shape of the rotation object. This shape is defined as the batch dimensions of the
398
+ underlying rotation matrix or quaternion. If the Rotation was initialized with a [10, 3, 3] rotation matrix
399
+ tensor, for example, the resulting shape would be [10].
400
+
401
+ Returns:
402
+ The virtual shape of the rotation object
403
+ """
404
+ if self._rot_mats is not None:
405
+ return self._rot_mats.shape[:-2]
406
+ elif self._quats is not None:
407
+ return self._quats.shape[:-1]
408
+ else:
409
+ raise ValueError("Both rotations are None")
410
+
411
+ @property
412
+ def dtype(self) -> torch.dtype:
413
+ """
414
+ Returns the dtype of the underlying rotation.
415
+
416
+ Returns:
417
+ The dtype of the underlying rotation
418
+ """
419
+ if self._rot_mats is not None:
420
+ return self._rot_mats.dtype
421
+ elif self._quats is not None:
422
+ return self._quats.dtype
423
+ else:
424
+ raise ValueError("Both rotations are None")
425
+
426
+ @property
427
+ def device(self) -> torch.device:
428
+ """
429
+ The device of the underlying rotation
430
+
431
+ Returns:
432
+ The device of the underlying rotation
433
+ """
434
+ if self._rot_mats is not None:
435
+ return self._rot_mats.device
436
+ elif self._quats is not None:
437
+ return self._quats.device
438
+ else:
439
+ raise ValueError("Both rotations are None")
440
+
441
+ @property
442
+ def requires_grad(self) -> bool:
443
+ """
444
+ Returns the requires_grad property of the underlying rotation
445
+
446
+ Returns:
447
+ The requires_grad property of the underlying tensor
448
+ """
449
+ if self._rot_mats is not None:
450
+ return self._rot_mats.requires_grad
451
+ elif self._quats is not None:
452
+ return self._quats.requires_grad
453
+ else:
454
+ raise ValueError("Both rotations are None")
455
+
456
+ def get_rot_mats(self) -> torch.Tensor:
457
+ """
458
+ Returns the underlying rotation as a rotation matrix tensor.
459
+
460
+ Returns:
461
+ The rotation as a rotation matrix tensor
462
+ """
463
+ if self._rot_mats is not None:
464
+ return self._rot_mats
465
+ elif self._quats is not None:
466
+ return quat_to_rot(self._quats)
467
+ else:
468
+ raise ValueError("Both rotations are None")
469
+
470
+ def get_quats(self) -> torch.Tensor:
471
+ """
472
+ Returns the underlying rotation as a quaternion tensor.
473
+
474
+ Depending on whether the Rotation was initialized with a quaternion, this function may call torch.linalg.eigh.
475
+
476
+ Returns:
477
+ The rotation as a quaternion tensor.
478
+ """
479
+ if self._rot_mats is not None:
480
+ return rot_to_quat(self._rot_mats)
481
+ elif self._quats is not None:
482
+ return self._quats
483
+ else:
484
+ raise ValueError("Both rotations are None")
485
+
486
+ def get_cur_rot(self) -> torch.Tensor:
487
+ """
488
+ Return the underlying rotation in its current form
489
+
490
+ Returns:
491
+ The stored rotation
492
+ """
493
+ if self._rot_mats is not None:
494
+ return self._rot_mats
495
+ elif self._quats is not None:
496
+ return self._quats
497
+ else:
498
+ raise ValueError("Both rotations are None")
499
+
500
+ # Rotation functions
501
+
502
+ def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool = True) -> Rotation:
503
+ """
504
+ Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion
505
+ update, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the
506
+ desired (not necessarily unit) quaternion update.
507
+
508
+ Args:
509
+ q_update_vec:
510
+ A [*, 3] quaternion update tensor
511
+ normalize_quats:
512
+ Whether to normalize the output quaternion
513
+ Returns:
514
+ An updated Rotation
515
+ """
516
+ quats = self.get_quats()
517
+ new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)
518
+ return Rotation(
519
+ rot_mats=None,
520
+ quats=new_quats,
521
+ normalize_quats=normalize_quats,
522
+ )
523
+
524
+ def compose_r(self, r: Rotation) -> Rotation:
525
+ """
526
+ Compose the rotation matrices of the current Rotation object with those of another.
527
+
528
+ Args:
529
+ r:
530
+ An update rotation object
531
+ Returns:
532
+ An updated rotation object
533
+ """
534
+ r1 = self.get_rot_mats()
535
+ r2 = r.get_rot_mats()
536
+ new_rot_mats = rot_matmul(r1, r2)
537
+ return Rotation(rot_mats=new_rot_mats, quats=None)
538
+
539
+ def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:
540
+ """
541
+ Compose the quaternions of the current Rotation object with those of another.
542
+
543
+ Depending on whether either Rotation was initialized with quaternions, this function may call
544
+ torch.linalg.eigh.
545
+
546
+ Args:
547
+ r:
548
+ An update rotation object
549
+ Returns:
550
+ An updated rotation object
551
+ """
552
+ q1 = self.get_quats()
553
+ q2 = r.get_quats()
554
+ new_quats = quat_multiply(q1, q2)
555
+ return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)
556
+
557
+ def apply(self, pts: torch.Tensor) -> torch.Tensor:
558
+ """
559
+ Apply the current Rotation as a rotation matrix to a set of 3D coordinates.
560
+
561
+ Args:
562
+ pts:
563
+ A [*, 3] set of points
564
+ Returns:
565
+ [*, 3] rotated points
566
+ """
567
+ rot_mats = self.get_rot_mats()
568
+ return rot_vec_mul(rot_mats, pts)
569
+
570
+ def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
571
+ """
572
+ The inverse of the apply() method.
573
+
574
+ Args:
575
+ pts:
576
+ A [*, 3] set of points
577
+ Returns:
578
+ [*, 3] inverse-rotated points
579
+ """
580
+ rot_mats = self.get_rot_mats()
581
+ inv_rot_mats = invert_rot_mat(rot_mats)
582
+ return rot_vec_mul(inv_rot_mats, pts)
583
+
584
+ def invert(self) -> Rotation:
585
+ """
586
+ Returns the inverse of the current Rotation.
587
+
588
+ Returns:
589
+ The inverse of the current Rotation
590
+ """
591
+ if self._rot_mats is not None:
592
+ return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None)
593
+ elif self._quats is not None:
594
+ return Rotation(
595
+ rot_mats=None,
596
+ quats=invert_quat(self._quats),
597
+ normalize_quats=False,
598
+ )
599
+ else:
600
+ raise ValueError("Both rotations are None")
601
+
602
+ # "Tensor" stuff
603
+
604
+ def unsqueeze(self, dim: int) -> Rotation:
605
+ """
606
+ Analogous to torch.unsqueeze. The dimension is relative to the shape of the Rotation object.
607
+
608
+ Args:
609
+ dim: A positive or negative dimension index.
610
+ Returns:
611
+ The unsqueezed Rotation.
612
+ """
613
+ if dim >= len(self.shape):
614
+ raise ValueError("Invalid dimension")
615
+
616
+ if self._rot_mats is not None:
617
+ rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)
618
+ return Rotation(rot_mats=rot_mats, quats=None)
619
+ elif self._quats is not None:
620
+ quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)
621
+ return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
622
+ else:
623
+ raise ValueError("Both rotations are None")
624
+
625
+ @staticmethod
626
+ def cat(rs: Sequence[Rotation], dim: int) -> Rotation:
627
+ """
628
+ Concatenates rotations along one of the batch dimensions. Analogous to torch.cat().
629
+
630
+ Note that the output of this operation is always a rotation matrix, regardless of the format of input
631
+ rotations.
632
+
633
+ Args:
634
+ rs:
635
+ A list of rotation objects
636
+ dim:
637
+ The dimension along which the rotations should be concatenated
638
+ Returns:
639
+ A concatenated Rotation object in rotation matrix format
640
+ """
641
+ rot_mats = torch.cat(
642
+ [r.get_rot_mats() for r in rs],
643
+ dim=dim if dim >= 0 else dim - 2,
644
+ )
645
+
646
+ return Rotation(rot_mats=rot_mats, quats=None)
647
+
648
+ def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rotation:
649
+ """
650
+ Apply a Tensor -> Tensor function to underlying rotation tensors, mapping over the rotation dimension(s). Can
651
+ be used e.g. to sum out a one-hot batch dimension.
652
+
653
+ Args:
654
+ fn:
655
+ A Tensor -> Tensor function to be mapped over the Rotation
656
+ Returns:
657
+ The transformed Rotation object
658
+ """
659
+ if self._rot_mats is not None:
660
+ rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))
661
+ rot_mats = torch.stack(list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1)
662
+ rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))
663
+ return Rotation(rot_mats=rot_mats, quats=None)
664
+ elif self._quats is not None:
665
+ quats = torch.stack(list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1)
666
+ return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
667
+ else:
668
+ raise ValueError("Both rotations are None")
669
+
670
+ def cuda(self) -> Rotation:
671
+ """
672
+ Analogous to the cuda() method of torch Tensors
673
+
674
+ Returns:
675
+ A copy of the Rotation in CUDA memory
676
+ """
677
+ if self._rot_mats is not None:
678
+ return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)
679
+ elif self._quats is not None:
680
+ return Rotation(rot_mats=None, quats=self._quats.cuda(), normalize_quats=False)
681
+ else:
682
+ raise ValueError("Both rotations are None")
683
+
684
+ def to(self, device: Optional[torch.device], dtype: Optional[torch.dtype]) -> Rotation:
685
+ """
686
+ Analogous to the to() method of torch Tensors
687
+
688
+ Args:
689
+ device:
690
+ A torch device
691
+ dtype:
692
+ A torch dtype
693
+ Returns:
694
+ A copy of the Rotation using the new device and dtype
695
+ """
696
+ if self._rot_mats is not None:
697
+ return Rotation(
698
+ rot_mats=self._rot_mats.to(device=device, dtype=dtype),
699
+ quats=None,
700
+ )
701
+ elif self._quats is not None:
702
+ return Rotation(
703
+ rot_mats=None,
704
+ quats=self._quats.to(device=device, dtype=dtype),
705
+ normalize_quats=False,
706
+ )
707
+ else:
708
+ raise ValueError("Both rotations are None")
709
+
710
+ def detach(self) -> Rotation:
711
+ """
712
+ Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph.
713
+
714
+ Returns:
715
+ A copy of the Rotation whose underlying Tensor has been detached from its torch graph
716
+ """
717
+ if self._rot_mats is not None:
718
+ return Rotation(rot_mats=self._rot_mats.detach(), quats=None)
719
+ elif self._quats is not None:
720
+ return Rotation(
721
+ rot_mats=None,
722
+ quats=self._quats.detach(),
723
+ normalize_quats=False,
724
+ )
725
+ else:
726
+ raise ValueError("Both rotations are None")
727
+
728
+
729
+ class Rigid:
730
+ """
731
+ A class representing a rigid transformation. Little more than a wrapper around two objects: a Rotation object and a
732
+ [*, 3] translation Designed to behave approximately like a single torch tensor with the shape of the shared batch
733
+ dimensions of its component parts.
734
+ """
735
+
736
+ def __init__(self, rots: Optional[Rotation], trans: Optional[torch.Tensor]):
737
+ """
738
+ Args:
739
+ rots: A [*, 3, 3] rotation tensor
740
+ trans: A corresponding [*, 3] translation tensor
741
+ """
742
+ # (we need device, dtype, etc. from at least one input)
743
+
744
+ batch_dims, dtype, device, requires_grad = None, None, None, None
745
+ if trans is not None:
746
+ batch_dims = trans.shape[:-1]
747
+ dtype = trans.dtype
748
+ device = trans.device
749
+ requires_grad = trans.requires_grad
750
+ elif rots is not None:
751
+ batch_dims = rots.shape
752
+ dtype = rots.dtype
753
+ device = rots.device
754
+ requires_grad = rots.requires_grad
755
+ else:
756
+ raise ValueError("At least one input argument must be specified")
757
+
758
+ if rots is None:
759
+ rots = Rotation.identity(
760
+ batch_dims,
761
+ dtype,
762
+ device,
763
+ requires_grad,
764
+ )
765
+ elif trans is None:
766
+ trans = identity_trans(
767
+ batch_dims,
768
+ dtype,
769
+ device,
770
+ requires_grad,
771
+ )
772
+
773
+ assert rots is not None
774
+ assert trans is not None
775
+
776
+ if (rots.shape != trans.shape[:-1]) or (rots.device != trans.device):
777
+ raise ValueError("Rots and trans incompatible")
778
+
779
+ # Force full precision. Happens to the rotations automatically.
780
+ trans = trans.to(dtype=torch.float32)
781
+
782
+ self._rots = rots
783
+ self._trans = trans
784
+
785
+ @staticmethod
786
+ def identity(
787
+ shape: Tuple[int, ...],
788
+ dtype: Optional[torch.dtype] = None,
789
+ device: Optional[torch.device] = None,
790
+ requires_grad: bool = True,
791
+ fmt: str = "quat",
792
+ ) -> Rigid:
793
+ """
794
+ Constructs an identity transformation.
795
+
796
+ Args:
797
+ shape:
798
+ The desired shape
799
+ dtype:
800
+ The dtype of both internal tensors
801
+ device:
802
+ The device of both internal tensors
803
+ requires_grad:
804
+ Whether grad should be enabled for the internal tensors
805
+ Returns:
806
+ The identity transformation
807
+ """
808
+ return Rigid(
809
+ Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),
810
+ identity_trans(shape, dtype, device, requires_grad),
811
+ )
812
+
813
+ def __getitem__(self, index: Any) -> Rigid:
814
+ """
815
+ Indexes the affine transformation with PyTorch-style indices. The index is applied to the shared dimensions of
816
+ both the rotation and the translation.
817
+
818
+ E.g.::
819
+
820
+ r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None) t = Rigid(r, torch.rand(10, 10, 3)) indexed =
821
+ t[3, 4:6] assert(indexed.shape == (2,)) assert(indexed.get_rots().shape == (2,))
822
+ assert(indexed.get_trans().shape == (2, 3))
823
+
824
+ Args:
825
+ index: A standard torch tensor index. E.g. 8, (10, None, 3),
826
+ or (3, slice(0, 1, None))
827
+ Returns:
828
+ The indexed tensor
829
+ """
830
+ if type(index) != tuple:
831
+ index = (index,)
832
+
833
+ return Rigid(
834
+ self._rots[index],
835
+ self._trans[index + (slice(None),)],
836
+ )
837
+
838
+ def __mul__(self, right: torch.Tensor) -> Rigid:
839
+ """
840
+ Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.
841
+
842
+ Args:
843
+ right:
844
+ The tensor multiplicand
845
+ Returns:
846
+ The product
847
+ """
848
+ if not (isinstance(right, torch.Tensor)):
849
+ raise TypeError("The other multiplicand must be a Tensor")
850
+
851
+ new_rots = self._rots * right
852
+ new_trans = self._trans * right[..., None]
853
+
854
+ return Rigid(new_rots, new_trans)
855
+
856
+ def __rmul__(self, left: torch.Tensor) -> Rigid:
857
+ """
858
+ Reverse pointwise multiplication of the transformation with a tensor.
859
+
860
+ Args:
861
+ left:
862
+ The left multiplicand
863
+ Returns:
864
+ The product
865
+ """
866
+ return self.__mul__(left)
867
+
868
+ @property
869
+ def shape(self) -> torch.Size:
870
+ """
871
+ Returns the shape of the shared dimensions of the rotation and the translation.
872
+
873
+ Returns:
874
+ The shape of the transformation
875
+ """
876
+ return self._trans.shape[:-1]
877
+
878
+ @property
879
+ def device(self) -> torch.device:
880
+ """
881
+ Returns the device on which the Rigid's tensors are located.
882
+
883
+ Returns:
884
+ The device on which the Rigid's tensors are located
885
+ """
886
+ return self._trans.device
887
+
888
+ def get_rots(self) -> Rotation:
889
+ """
890
+ Getter for the rotation.
891
+
892
+ Returns:
893
+ The rotation object
894
+ """
895
+ return self._rots
896
+
897
+ def get_trans(self) -> torch.Tensor:
898
+ """
899
+ Getter for the translation.
900
+
901
+ Returns:
902
+ The stored translation
903
+ """
904
+ return self._trans
905
+
906
+ def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid:
907
+ """
908
+ Composes the transformation with a quaternion update vector of shape [*, 6], where the final 6 columns
909
+ represent the x, y, and z values of a quaternion of form (1, x, y, z) followed by a 3D translation.
910
+
911
+ Args:
912
+ q_vec: The quaternion update vector.
913
+ Returns:
914
+ The composed transformation.
915
+ """
916
+ q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]
917
+ new_rots = self._rots.compose_q_update_vec(q_vec)
918
+
919
+ trans_update = self._rots.apply(t_vec)
920
+ new_translation = self._trans + trans_update
921
+
922
+ return Rigid(new_rots, new_translation)
923
+
924
+ def compose(self, r: Rigid) -> Rigid:
925
+ """
926
+ Composes the current rigid object with another.
927
+
928
+ Args:
929
+ r:
930
+ Another Rigid object
931
+ Returns:
932
+ The composition of the two transformations
933
+ """
934
+ new_rot = self._rots.compose_r(r._rots)
935
+ new_trans = self._rots.apply(r._trans) + self._trans
936
+ return Rigid(new_rot, new_trans)
937
+
938
+ def apply(self, pts: torch.Tensor) -> torch.Tensor:
939
+ """
940
+ Applies the transformation to a coordinate tensor.
941
+
942
+ Args:
943
+ pts: A [*, 3] coordinate tensor.
944
+ Returns:
945
+ The transformed points.
946
+ """
947
+ rotated = self._rots.apply(pts)
948
+ return rotated + self._trans
949
+
950
+ def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
951
+ """
952
+ Applies the inverse of the transformation to a coordinate tensor.
953
+
954
+ Args:
955
+ pts: A [*, 3] coordinate tensor
956
+ Returns:
957
+ The transformed points.
958
+ """
959
+ pts = pts - self._trans
960
+ return self._rots.invert_apply(pts)
961
+
962
+ def invert(self) -> Rigid:
963
+ """
964
+ Inverts the transformation.
965
+
966
+ Returns:
967
+ The inverse transformation.
968
+ """
969
+ rot_inv = self._rots.invert()
970
+ trn_inv = rot_inv.apply(self._trans)
971
+
972
+ return Rigid(rot_inv, -1 * trn_inv)
973
+
974
+ def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
975
+ """
976
+ Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the
977
+ translation/rotation dimensions respectively.
978
+
979
+ Args:
980
+ fn:
981
+ A Tensor -> Tensor function to be mapped over the Rigid
982
+ Returns:
983
+ The transformed Rigid object
984
+ """
985
+ new_rots = self._rots.map_tensor_fn(fn)
986
+ new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1)
987
+
988
+ return Rigid(new_rots, new_trans)
989
+
990
+ def to_tensor_4x4(self) -> torch.Tensor:
991
+ """
992
+ Converts a transformation to a homogenous transformation tensor.
993
+
994
+ Returns:
995
+ A [*, 4, 4] homogenous transformation tensor
996
+ """
997
+ tensor = self._trans.new_zeros((*self.shape, 4, 4))
998
+ tensor[..., :3, :3] = self._rots.get_rot_mats()
999
+ tensor[..., :3, 3] = self._trans
1000
+ tensor[..., 3, 3] = 1
1001
+ return tensor
1002
+
1003
+ @staticmethod
1004
+ def from_tensor_4x4(t: torch.Tensor) -> Rigid:
1005
+ """
1006
+ Constructs a transformation from a homogenous transformation tensor.
1007
+
1008
+ Args:
1009
+ t: [*, 4, 4] homogenous transformation tensor
1010
+ Returns:
1011
+ T object with shape [*]
1012
+ """
1013
+ if t.shape[-2:] != (4, 4):
1014
+ raise ValueError("Incorrectly shaped input tensor")
1015
+
1016
+ rots = Rotation(rot_mats=t[..., :3, :3], quats=None)
1017
+ trans = t[..., :3, 3]
1018
+
1019
+ return Rigid(rots, trans)
1020
+
1021
+ def to_tensor_7(self) -> torch.Tensor:
1022
+ """
1023
+ Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the
1024
+ translation.
1025
+
1026
+ Returns:
1027
+ A [*, 7] tensor representation of the transformation
1028
+ """
1029
+ tensor = self._trans.new_zeros((*self.shape, 7))
1030
+ tensor[..., :4] = self._rots.get_quats()
1031
+ tensor[..., 4:] = self._trans
1032
+
1033
+ return tensor
1034
+
1035
+ @staticmethod
1036
+ def from_tensor_7(t: torch.Tensor, normalize_quats: bool = False) -> Rigid:
1037
+ if t.shape[-1] != 7:
1038
+ raise ValueError("Incorrectly shaped input tensor")
1039
+
1040
+ quats, trans = t[..., :4], t[..., 4:]
1041
+
1042
+ rots = Rotation(rot_mats=None, quats=quats, normalize_quats=normalize_quats)
1043
+
1044
+ return Rigid(rots, trans)
1045
+
1046
+ @staticmethod
1047
+ def from_3_points(
1048
+ p_neg_x_axis: torch.Tensor, origin: torch.Tensor, p_xy_plane: torch.Tensor, eps: float = 1e-8
1049
+ ) -> Rigid:
1050
+ """
1051
+ Implements algorithm 21. Constructs transformations from sets of 3 points using the Gram-Schmidt algorithm.
1052
+
1053
+ Args:
1054
+ p_neg_x_axis: [*, 3] coordinates
1055
+ origin: [*, 3] coordinates used as frame origins
1056
+ p_xy_plane: [*, 3] coordinates
1057
+ eps: Small epsilon value
1058
+ Returns:
1059
+ A transformation object of shape [*]
1060
+ """
1061
+ p_neg_x_axis_unbound = torch.unbind(p_neg_x_axis, dim=-1)
1062
+ origin_unbound = torch.unbind(origin, dim=-1)
1063
+ p_xy_plane_unbound = torch.unbind(p_xy_plane, dim=-1)
1064
+
1065
+ e0 = [c1 - c2 for c1, c2 in zip(origin_unbound, p_neg_x_axis_unbound)]
1066
+ e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane_unbound, origin_unbound)]
1067
+
1068
+ denom = torch.sqrt(sum(c * c for c in e0) + eps * torch.ones_like(e0[0]))
1069
+ e0 = [c / denom for c in e0]
1070
+ dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))
1071
+ e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]
1072
+ denom = torch.sqrt(sum((c * c for c in e1)) + eps * torch.ones_like(e1[0]))
1073
+ e1 = [c / denom for c in e1]
1074
+ e2 = [
1075
+ e0[1] * e1[2] - e0[2] * e1[1],
1076
+ e0[2] * e1[0] - e0[0] * e1[2],
1077
+ e0[0] * e1[1] - e0[1] * e1[0],
1078
+ ]
1079
+
1080
+ rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)
1081
+ rots = rots.reshape(rots.shape[:-1] + (3, 3))
1082
+
1083
+ rot_obj = Rotation(rot_mats=rots, quats=None)
1084
+
1085
+ return Rigid(rot_obj, torch.stack(origin_unbound, dim=-1))
1086
+
1087
+ def unsqueeze(self, dim: int) -> Rigid:
1088
+ """
1089
+ Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.
1090
+
1091
+ Args:
1092
+ dim: A positive or negative dimension index.
1093
+ Returns:
1094
+ The unsqueezed transformation.
1095
+ """
1096
+ if dim >= len(self.shape):
1097
+ raise ValueError("Invalid dimension")
1098
+ rots = self._rots.unsqueeze(dim)
1099
+ trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)
1100
+
1101
+ return Rigid(rots, trans)
1102
+
1103
+ @staticmethod
1104
+ def cat(ts: Sequence[Rigid], dim: int) -> Rigid:
1105
+ """
1106
+ Concatenates transformations along a new dimension.
1107
+
1108
+ Args:
1109
+ ts:
1110
+ A list of T objects
1111
+ dim:
1112
+ The dimension along which the transformations should be concatenated
1113
+ Returns:
1114
+ A concatenated transformation object
1115
+ """
1116
+ rots = Rotation.cat([t._rots for t in ts], dim)
1117
+ trans = torch.cat([t._trans for t in ts], dim=dim if dim >= 0 else dim - 1)
1118
+
1119
+ return Rigid(rots, trans)
1120
+
1121
+ def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:
1122
+ """
1123
+ Applies a Rotation -> Rotation function to the stored rotation object.
1124
+
1125
+ Args:
1126
+ fn: A function of type Rotation -> Rotation
1127
+ Returns:
1128
+ A transformation object with a transformed rotation.
1129
+ """
1130
+ return Rigid(fn(self._rots), self._trans)
1131
+
1132
+ def apply_trans_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
1133
+ """
1134
+ Applies a Tensor -> Tensor function to the stored translation.
1135
+
1136
+ Args:
1137
+ fn:
1138
+ A function of type Tensor -> Tensor to be applied to the translation
1139
+ Returns:
1140
+ A transformation object with a transformed translation.
1141
+ """
1142
+ return Rigid(self._rots, fn(self._trans))
1143
+
1144
+ def scale_translation(self, trans_scale_factor: float) -> Rigid:
1145
+ """
1146
+ Scales the translation by a constant factor.
1147
+
1148
+ Args:
1149
+ trans_scale_factor:
1150
+ The constant factor
1151
+ Returns:
1152
+ A transformation object with a scaled translation.
1153
+ """
1154
+ return self.apply_trans_fn(lambda t: t * trans_scale_factor)
1155
+
1156
+ def stop_rot_gradient(self) -> Rigid:
1157
+ """
1158
+ Detaches the underlying rotation object
1159
+
1160
+ Returns:
1161
+ A transformation object with detached rotations
1162
+ """
1163
+ return self.apply_rot_fn(lambda r: r.detach())
1164
+
1165
+ @staticmethod
1166
+ def make_transform_from_reference(
1167
+ n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float = 1e-20
1168
+ ) -> Rigid:
1169
+ """
1170
+ Returns a transformation object from reference coordinates.
1171
+
1172
+ Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard
1173
+ way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You
1174
+ need to take care of such cases in your code.
1175
+
1176
+ Args:
1177
+ n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.
1178
+ ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.
1179
+ c_xyz: A [*, 3] tensor of carbon xyz coordinates.
1180
+ Returns:
1181
+ A transformation object. After applying the translation and rotation to the reference backbone, the
1182
+ coordinates will approximately equal to the input coordinates.
1183
+ """
1184
+ translation = -1 * ca_xyz
1185
+ n_xyz = n_xyz + translation
1186
+ c_xyz = c_xyz + translation
1187
+
1188
+ c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]
1189
+ norm = torch.sqrt(eps + c_x**2 + c_y**2)
1190
+ sin_c1 = -c_y / norm
1191
+ cos_c1 = c_x / norm
1192
+
1193
+ c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))
1194
+ c1_rots[..., 0, 0] = cos_c1
1195
+ c1_rots[..., 0, 1] = -1 * sin_c1
1196
+ c1_rots[..., 1, 0] = sin_c1
1197
+ c1_rots[..., 1, 1] = cos_c1
1198
+ c1_rots[..., 2, 2] = 1
1199
+
1200
+ norm = torch.sqrt(eps + c_x**2 + c_y**2 + c_z**2)
1201
+ sin_c2 = c_z / norm
1202
+ cos_c2 = torch.sqrt(c_x**2 + c_y**2) / norm
1203
+
1204
+ c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
1205
+ c2_rots[..., 0, 0] = cos_c2
1206
+ c2_rots[..., 0, 2] = sin_c2
1207
+ c2_rots[..., 1, 1] = 1
1208
+ c2_rots[..., 2, 0] = -1 * sin_c2
1209
+ c2_rots[..., 2, 2] = cos_c2
1210
+
1211
+ c_rots = rot_matmul(c2_rots, c1_rots)
1212
+ n_xyz = rot_vec_mul(c_rots, n_xyz)
1213
+
1214
+ _, n_y, n_z = [n_xyz[..., i] for i in range(3)]
1215
+ norm = torch.sqrt(eps + n_y**2 + n_z**2)
1216
+ sin_n = -n_z / norm
1217
+ cos_n = n_y / norm
1218
+
1219
+ n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
1220
+ n_rots[..., 0, 0] = 1
1221
+ n_rots[..., 1, 1] = cos_n
1222
+ n_rots[..., 1, 2] = -1 * sin_n
1223
+ n_rots[..., 2, 1] = sin_n
1224
+ n_rots[..., 2, 2] = cos_n
1225
+
1226
+ rots = rot_matmul(n_rots, c_rots)
1227
+
1228
+ rots = rots.transpose(-1, -2)
1229
+ translation = -1 * translation
1230
+
1231
+ rot_obj = Rotation(rot_mats=rots, quats=None)
1232
+
1233
+ return Rigid(rot_obj, translation)
1234
+
1235
+ def cuda(self) -> Rigid:
1236
+ """
1237
+ Moves the transformation object to GPU memory
1238
+
1239
+ Returns:
1240
+ A version of the transformation on GPU
1241
+ """
1242
+ return Rigid(self._rots.cuda(), self._trans.cuda())