diff --git a/ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4aec8f959f37042c5318df71db027fa2ab004e8e
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e89bf2e55e35ed0f05fbec26ef376f030a1b946484f40f2c9c0c827f184f1889
+size 33555627
diff --git a/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..10dcd0cd7abdab1aaa863a7aab1b26276367bf5d
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cbc680c6b7aa23968e96f914f4e462357b4fb538d4a269593c227c67d4f4bd1c
+size 33555627
diff --git a/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b307a24d9ddf0ff85371fa94b6627939fade1412
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:808d89a48271846ad48d88b3d8c563a03d2650c4ba42a020a2b2e9e35f2294d1
+size 33555533
diff --git a/ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3781e3e20f55b9575df917709b9a3156cf8174f6
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be0cdd8a27794a9b0901b27c872baccf9e5ff24b3ac7505f9b3235390c31e9ea
+size 16778396
diff --git a/ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..76f12ee0f072aaec11767939d961f89ef3b2c6bc
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/6.attention.dense.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f05953525977728553a39a506e566c6611353f64cb94b6053cd02e152cef9964
+size 16778411
diff --git a/ckpts/universal/global_step40/zero/6.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/6.attention.dense.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f08cd2a0dd850c6042f264dab4a66b9adbdf6375
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/6.attention.dense.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6c7ae9a86ef8973269e9560eb05c8eba5ea41f355259ba690fc2ac7c0efb0c2
+size 16778317
diff --git a/venv/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f87bfdea532d61d4bc63802eced65f108328e666
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_autoformer": [
+ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "AutoformerConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_autoformer"] = [
+ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "AutoformerForPrediction",
+ "AutoformerModel",
+ "AutoformerPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_autoformer import (
+ AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ AutoformerConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_autoformer import (
+ AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ AutoformerForPrediction,
+ AutoformerModel,
+ AutoformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cda294147bf8a9496494ed83152303c6ac786bf1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py b/venv/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..11909ac5c38c4c487fc28e84e53d863c93563c30
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py
@@ -0,0 +1,245 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Autoformer model configuration"""
+
+from typing import List, Optional
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class AutoformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an
+ Autoformer model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Autoformer
+ [huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ prediction_length (`int`):
+ The prediction length for the decoder. In other words, the prediction horizon of the model.
+ context_length (`int`, *optional*, defaults to `prediction_length`):
+ The context length for the encoder. If unset, the context length will be the same as the
+ `prediction_length`.
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
+ loss (`string`, *optional*, defaults to `"nll"`):
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
+ input_size (`int`, *optional*, defaults to 1):
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
+ multivariate targets.
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
+ The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
+ 5, 6, 7]`.
+ scaling (`bool`, *optional* defaults to `True`):
+ Whether to scale the input targets.
+ num_time_features (`int`, *optional*, defaults to 0):
+ The number of time features in the input time series.
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
+ The number of dynamic real valued features.
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
+ The number of static categorical features.
+ num_static_real_features (`int`, *optional*, defaults to 0):
+ The number of static real valued features.
+ cardinality (`list[int]`, *optional*):
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ embedding_dimension (`list[int]`, *optional*):
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ d_model (`int`, *optional*, defaults to 64):
+ Dimensionality of the transformer layers.
+ encoder_layers (`int`, *optional*, defaults to 2):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 2):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
+ `"relu"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the encoder, and decoder.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each encoder layer.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each decoder layer.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability used between the two layers of the feed-forward networks.
+ num_parallel_samples (`int`, *optional*, defaults to 100):
+ The number of samples to generate in parallel for each time step of inference.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated normal weight initialization distribution.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
+ label_length (`int`, *optional*, defaults to 10):
+ Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
+ non-autoregressive generation).
+ moving_average (`int`, defaults to 25):
+ The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
+ Layer.
+ autocorrelation_factor (`int`, defaults to 3):
+ "Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
+ It's recommended in the paper to set it to a number between 1 and 5.
+
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoformerConfig, AutoformerModel
+
+ >>> # Initializing a default Autoformer configuration
+ >>> configuration = AutoformerConfig()
+
+ >>> # Randomly initializing a model (with random weights) from the configuration
+ >>> model = AutoformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "autoformer"
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "encoder_attention_heads",
+ "num_hidden_layers": "encoder_layers",
+ }
+
+ def __init__(
+ self,
+ prediction_length: Optional[int] = None,
+ context_length: Optional[int] = None,
+ distribution_output: str = "student_t",
+ loss: str = "nll",
+ input_size: int = 1,
+ lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7],
+ scaling: bool = True,
+ num_time_features: int = 0,
+ num_dynamic_real_features: int = 0,
+ num_static_categorical_features: int = 0,
+ num_static_real_features: int = 0,
+ cardinality: Optional[List[int]] = None,
+ embedding_dimension: Optional[List[int]] = None,
+ d_model: int = 64,
+ encoder_attention_heads: int = 2,
+ decoder_attention_heads: int = 2,
+ encoder_layers: int = 2,
+ decoder_layers: int = 2,
+ encoder_ffn_dim: int = 32,
+ decoder_ffn_dim: int = 32,
+ activation_function: str = "gelu",
+ dropout: float = 0.1,
+ encoder_layerdrop: float = 0.1,
+ decoder_layerdrop: float = 0.1,
+ attention_dropout: float = 0.1,
+ activation_dropout: float = 0.1,
+ num_parallel_samples: int = 100,
+ init_std: float = 0.02,
+ use_cache: bool = True,
+ is_encoder_decoder=True,
+ # Autoformer arguments
+ label_length: int = 10,
+ moving_average: int = 25,
+ autocorrelation_factor: int = 3,
+ **kwargs,
+ ):
+ # time series specific configuration
+ self.prediction_length = prediction_length
+ self.context_length = context_length if context_length is not None else prediction_length
+ self.distribution_output = distribution_output
+ self.loss = loss
+ self.input_size = input_size
+ self.num_time_features = num_time_features
+ self.lags_sequence = lags_sequence
+ self.scaling = scaling
+ self.num_dynamic_real_features = num_dynamic_real_features
+ self.num_static_real_features = num_static_real_features
+ self.num_static_categorical_features = num_static_categorical_features
+ if cardinality is not None and num_static_categorical_features > 0:
+ if len(cardinality) != num_static_categorical_features:
+ raise ValueError(
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.cardinality = cardinality
+ else:
+ self.cardinality = [0]
+ if embedding_dimension is not None and num_static_categorical_features > 0:
+ if len(embedding_dimension) != num_static_categorical_features:
+ raise ValueError(
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.embedding_dimension = embedding_dimension
+ else:
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
+ self.num_parallel_samples = num_parallel_samples
+
+ # Transformer architecture configuration
+ self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
+ self.d_model = d_model
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_attention_heads = decoder_attention_heads
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.decoder_layers = decoder_layers
+
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+
+ self.activation_function = activation_function
+ self.init_std = init_std
+
+ self.use_cache = use_cache
+
+ # Autoformer
+ self.label_length = label_length
+ self.moving_average = moving_average
+ self.autocorrelation_factor = autocorrelation_factor
+
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
+
+ @property
+ def _number_of_features(self) -> int:
+ return (
+ sum(self.embedding_dimension)
+ + self.num_dynamic_real_features
+ + self.num_time_features
+ + self.num_static_real_features
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py b/venv/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a993fad32785f14f051332655cc9c11fd12d24a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py
@@ -0,0 +1,2155 @@
+# coding=utf-8
+# Copyright (c) 2021 THUML @ Tsinghua University
+# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Autoformer model."""
+
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ ModelOutput,
+ SampleTSPredictionOutput,
+ Seq2SeqTSPredictionOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_autoformer import AutoformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "AutoformerConfig"
+
+
+@dataclass
+class AutoFormerDecoderOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Trend tensor for each time series.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ trend: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class AutoformerModelOutput(ModelOutput):
+ """
+ Autoformer model output that contains the additional trend output.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Trend tensor for each time series.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):
+ Shift values of each time series' context window which is used to give the model inputs of the same
+ magnitude and then used to shift back to the original magnitude.
+ scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):
+ Scaling values of each time series' context window which is used to give the model inputs of the same
+ magnitude and then used to rescale back to the original magnitude.
+ static_features: (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*):
+ Static features of each time series' in a batch which are copied to the covariates at inference time.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ trend: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ loc: Optional[torch.FloatTensor] = None
+ scale: Optional[torch.FloatTensor] = None
+ static_features: Optional[torch.FloatTensor] = None
+
+
+from ..deprecated._archive_maps import AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder with TimeSeries->Autoformer
+class AutoformerFeatureEmbedder(nn.Module):
+ """
+ Embed a sequence of categorical features.
+
+ Args:
+ cardinalities (`list[int]`):
+ List of cardinalities of the categorical features.
+ embedding_dims (`list[int]`):
+ List of embedding dimensions of the categorical features.
+ """
+
+ def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:
+ super().__init__()
+
+ self.num_features = len(cardinalities)
+ self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ if self.num_features > 1:
+ # we slice the last dimension, giving an array of length
+ # self.num_features with shape (N,T) or (N)
+ cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
+ else:
+ cat_feature_slices = [features]
+
+ return torch.cat(
+ [
+ embed(cat_feature_slice.squeeze(-1))
+ for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)
+ ],
+ dim=-1,
+ )
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
+class AutoformerStdScaler(nn.Module):
+ """
+ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
+ subtracting from the mean and dividing by the standard deviation.
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+ self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ Calculating the scale on the observed indicator.
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
+ denominator = denominator.clamp_min(1.0)
+ loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
+
+ variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
+ scale = torch.sqrt(variance + self.minimum_scale)
+ return (data - loc) / scale, loc, scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
+class AutoformerMeanScaler(nn.Module):
+ """
+ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
+ accordingly.
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+ self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10
+ self.default_scale = config.default_scale if hasattr(config, "default_scale") else None
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ Calculating the scale on the observed indicator.
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
+ num_observed = observed_indicator.sum(self.dim, keepdim=True)
+
+ scale = ts_sum / torch.clamp(num_observed, min=1)
+
+ # If `default_scale` is provided, we use it, otherwise we use the scale
+ # of the batch.
+ if self.default_scale is None:
+ batch_sum = ts_sum.sum(dim=0)
+ batch_observations = torch.clamp(num_observed.sum(0), min=1)
+ default_scale = torch.squeeze(batch_sum / batch_observations)
+ else:
+ default_scale = self.default_scale * torch.ones_like(scale)
+
+ # apply default scale where there are no observations
+ scale = torch.where(num_observed > 0, scale, default_scale)
+
+ # ensure the scale is at least `self.minimum_scale`
+ scale = torch.clamp(scale, min=self.minimum_scale)
+ scaled_data = data / scale
+
+ if not self.keepdim:
+ scale = scale.squeeze(dim=self.dim)
+
+ return scaled_data, torch.zeros_like(scale), scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
+class AutoformerNOPScaler(nn.Module):
+ """
+ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor = None
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ return data, loc, scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average
+def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:
+ """
+ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
+ meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
+
+ Args:
+ input_tensor (`torch.FloatTensor`):
+ Input tensor, of which the average must be computed.
+ weights (`torch.FloatTensor`, *optional*):
+ Weights tensor, of the same shape as `input_tensor`.
+ dim (`int`, *optional*):
+ The dim along which to average `input_tensor`.
+
+ Returns:
+ `torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
+ """
+ if weights is not None:
+ weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
+ sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
+ return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
+ else:
+ return input_tensor.mean(dim=dim)
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll
+def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
+ """
+ Computes the negative log likelihood loss from input distribution with respect to target.
+ """
+ return -input.log_prob(target)
+
+
+# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Autoformer
+class AutoformerSinusoidalPositionalEmbedding(nn.Embedding):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
+ super().__init__(num_positions, embedding_dim)
+ self.weight = self._init_weight(self.weight)
+
+ @staticmethod
+ def _init_weight(out: nn.Parameter) -> nn.Parameter:
+ """
+ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
+ the 2nd half of the vector. [dim // 2:]
+ """
+ n_pos, dim = out.shape
+ position_enc = np.array(
+ [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
+ )
+ out.requires_grad = False # set early to avoid an error in pytorch-1.8+
+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
+ out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+ return out
+
+ @torch.no_grad()
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ bsz, seq_len = input_ids_shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ )
+ return super().forward(positions)
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesValueEmbedding with TimeSeries->Autoformer
+class AutoformerValueEmbedding(nn.Module):
+ def __init__(self, feature_size, d_model):
+ super().__init__()
+ self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)
+
+ def forward(self, x):
+ return self.value_projection(x)
+
+
+# Class based on
+# https://github.com/thuml/Autoformer/blob/c6a0694ff484753f2d986cc0bb1f99ee850fc1a8/layers/Autoformer_EncDec.py#L39
+# where AutoformerSeriesDecompositionLayer is series_decomp + moving_average
+class AutoformerSeriesDecompositionLayer(nn.Module):
+ """
+ Returns the trend and the seasonal parts of the time series. Calculated as:
+
+ x_trend = AvgPool(Padding(X)) and x_seasonal = X - x_trend
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.kernel_size = config.moving_average
+ self.avg = nn.AvgPool1d(kernel_size=self.kernel_size, stride=1, padding=0)
+
+ def forward(self, x):
+ """Input shape: Batch x Time x EMBED_DIM"""
+ # padding on the both ends of time series
+ num_of_pads = (self.kernel_size - 1) // 2
+ front = x[:, 0:1, :].repeat(1, num_of_pads, 1)
+ end = x[:, -1:, :].repeat(1, num_of_pads, 1)
+ x_padded = torch.cat([front, x, end], dim=1)
+
+ # calculate the trend and seasonal part of the series
+ x_trend = self.avg(x_padded.permute(0, 2, 1)).permute(0, 2, 1)
+ x_seasonal = x - x_trend
+ return x_seasonal, x_trend
+
+
+# Class based on
+# https://github.com/thuml/Autoformer/blob/c6a0694ff484753f2d986cc0bb1f99ee850fc1a8/layers/Autoformer_EncDec.py#L6
+# where AutoformerLayernorm is my_Layernorm
+class AutoformerLayernorm(nn.Module):
+ """
+ Special designed layer normalization for the seasonal part, calculated as: AutoformerLayernorm(x) = nn.LayerNorm(x)
+ - torch.mean(nn.LayerNorm(x))
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.layernorm = nn.LayerNorm(config.d_model)
+
+ def forward(self, x):
+ x_hat = self.layernorm(x)
+ bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1)
+ return x_hat - bias
+
+
+class AutoformerAttention(nn.Module):
+ """
+ AutoCorrelation Mechanism with the following two phases:
+ (1) period-based dependencies discovery (2) time delay aggregation
+ This block replace the canonical self-attention mechanism.
+ """
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ autocorrelation_factor: int = 3,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ self.autocorrelation_factor = autocorrelation_factor
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states)
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ # (1) period-based dependencies discovery
+ # Resize (truncation or zero filling)
+ queries_time_length = query_states.size(1)
+ values_time_length = value_states.size(1)
+ if queries_time_length > values_time_length:
+ query_states = query_states[:, : (queries_time_length - values_time_length), :]
+ zeros = torch.zeros_like(query_states).float()
+ value_states = torch.cat([value_states, zeros], dim=1)
+ key_states = torch.cat([key_states, zeros], dim=1)
+ else:
+ value_states = value_states[:, :queries_time_length, :]
+ key_states = key_states[:, :queries_time_length, :]
+
+ query_states_fft = torch.fft.rfft(query_states, n=tgt_len, dim=1)
+ key_states_fft = torch.fft.rfft(key_states, n=tgt_len, dim=1)
+ attn_weights = query_states_fft * torch.conj(key_states_fft)
+ attn_weights = torch.fft.irfft(attn_weights, n=tgt_len, dim=1) # Autocorrelation(Q,K)
+
+ src_len = key_states.size(1)
+ channel = key_states.size(2)
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, channel):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, channel)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, channel)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, channel)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, channel)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, channel)
+ else:
+ attn_weights_reshaped = None
+
+ # time delay aggregation
+ time_length = value_states.size(1)
+ autocorrelations = attn_weights.view(bsz, self.num_heads, tgt_len, channel)
+
+ # find top k autocorrelations delays
+ top_k = int(self.autocorrelation_factor * math.log(time_length))
+ autocorrelations_mean_on_head_channel = torch.mean(autocorrelations, dim=(1, -1)) # bsz x tgt_len
+ if self.training:
+ autocorrelations_mean_on_bsz = torch.mean(autocorrelations_mean_on_head_channel, dim=0)
+ _, top_k_delays_index = torch.topk(autocorrelations_mean_on_bsz, top_k)
+ top_k_autocorrelations = torch.stack(
+ [autocorrelations_mean_on_head_channel[:, top_k_delays_index[i]] for i in range(top_k)], dim=-1
+ )
+ else:
+ top_k_autocorrelations, top_k_delays_index = torch.topk(
+ autocorrelations_mean_on_head_channel, top_k, dim=1
+ )
+
+ top_k_autocorrelations = torch.softmax(top_k_autocorrelations, dim=-1) # bsz x top_k
+
+ # compute aggregation: value_states.roll(delay) * top_k_autocorrelations(delay)
+ if not self.training:
+ # used for compute values_states.roll(delay) in inference
+ tmp_values = value_states.repeat(1, 2, 1)
+ init_index = (
+ torch.arange(time_length)
+ .view(1, -1, 1)
+ .repeat(bsz * self.num_heads, 1, channel)
+ .to(value_states.device)
+ )
+
+ delays_agg = torch.zeros_like(value_states).float() # bsz x time_length x channel
+ for i in range(top_k):
+ # compute value_states roll delay
+ if not self.training:
+ tmp_delay = init_index + top_k_delays_index[:, i].view(-1, 1, 1).repeat(
+ self.num_heads, tgt_len, channel
+ )
+ value_states_roll_delay = torch.gather(tmp_values, dim=1, index=tmp_delay)
+ else:
+ value_states_roll_delay = value_states.roll(shifts=-int(top_k_delays_index[i]), dims=1)
+
+ # aggregation
+ top_k_autocorrelations_at_delay = (
+ top_k_autocorrelations[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel)
+ )
+ delays_agg += value_states_roll_delay * top_k_autocorrelations_at_delay
+
+ attn_output = delays_agg.contiguous()
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class AutoformerEncoderLayer(nn.Module):
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = AutoformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ autocorrelation_factor=config.autocorrelation_factor,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = AutoformerLayernorm(config)
+ self.decomp1 = AutoformerSeriesDecompositionLayer(config)
+ self.decomp2 = AutoformerSeriesDecompositionLayer(config)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ attention_mask: torch.FloatTensor,
+ layer_head_mask: torch.FloatTensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ # added layer norm here as an improvement
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, _ = self.decomp1(hidden_states)
+
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states, _ = self.decomp2(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class AutoformerDecoderLayer(nn.Module):
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = AutoformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ autocorrelation_factor=config.autocorrelation_factor,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = AutoformerAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ autocorrelation_factor=config.autocorrelation_factor,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = AutoformerLayernorm(config)
+
+ self.decomp1 = AutoformerSeriesDecompositionLayer(config)
+ self.decomp2 = AutoformerSeriesDecompositionLayer(config)
+ self.decomp3 = AutoformerSeriesDecompositionLayer(config)
+
+ # source: https://github.com/thuml/Autoformer/blob/e6371e24f2ae2dd53e472edefdd5814c5176f864/layers/Autoformer_EncDec.py#L128
+ self.trend_projection = nn.Conv1d(
+ in_channels=self.embed_dim,
+ out_channels=config.feature_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ padding_mode="circular",
+ bias=False,
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache: (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the `present_key_value` state to be used for subsequent
+ decoding.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states, trend1 = self.decomp1(hidden_states)
+ # added layer norm here as an improvement
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states, trend2 = self.decomp2(hidden_states)
+ # added layer norm here as an improvement
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states, trend3 = self.decomp3(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if encoder_hidden_states is not None:
+ residual_trend = trend1 + trend2 + trend3
+ else:
+ residual_trend = trend1 + trend3
+ residual_trend = self.trend_projection(residual_trend.permute(0, 2, 1)).transpose(1, 2)
+ outputs = ((hidden_states, residual_trend),)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class AutoformerPreTrainedModel(PreTrainedModel):
+ config_class = AutoformerConfig
+ base_model_prefix = "model"
+ main_input_name = "past_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, AutoformerSinusoidalPositionalEmbedding):
+ pass
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+AUTOFORMER_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`AutoformerConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+AUTOFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Past values of the time series, that serve as context in order to predict the future. These values may
+ contain lags, i.e. additional values from the past which are added in order to serve as "extra context".
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
+ `static_categorical_features`, `static_real_features`, `past_time_features`).
+
+ The sequence length here is equal to `context_length` + `max(config.lags_sequence)`.
+
+ Missing values need to be replaced with zeros.
+
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*):
+ Optional time features, which the model internally will add to `past_values`. These could be things like
+ "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
+ could also be so-called "age" features, which basically help the model know "at which point in life" a
+ time-series is. Age features have small values for distant past time steps and increase monotonically the
+ more we approach the current time step.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional time features.
+
+ The Autoformer only learns additional embeddings for `static_categorical_features`.
+
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to the
+ values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static categorical feature is a time series ID.
+
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+
+ future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`):
+ Future values of the time series, that serve as labels for the model. The `future_values` is what the
+ Transformer needs to learn to output, given the `past_values`.
+
+ See the demo notebook and code snippets for details.
+
+ Missing values need to be replaced with zeros.
+
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*):
+ Optional time features, which the model internally will add to `future_values`. These could be things like
+ "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
+ could also be so-called "age" features, which basically help the model know "at which point in life" a
+ time-series is. Age features have small values for distant past time steps and increase monotonically the
+ more we approach the current time step.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional features.
+
+ The Autoformer only learns additional embeddings for `static_categorical_features`.
+
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to
+ make sure the model can only look at previous inputs in order to predict the future.
+
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerEncoder with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
+class AutoformerEncoder(AutoformerPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`AutoformerEncoderLayer`].
+
+ Args:
+ config: AutoformerConfig
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = AutoformerSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([AutoformerEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(inputs_embeds.size())
+
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class AutoformerDecoder(AutoformerPreTrainedModel):
+ """
+ Transformer decoder consisting of `config.decoder_layers` layers. Each layer is a [`AutoformerDecoderLayer`]
+
+ Args:
+ config: AutoformerConfig
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = AutoformerSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([AutoformerDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ # https://github.com/thuml/Autoformer/blob/e6371e24f2ae2dd53e472edefdd5814c5176f864/models/Autoformer.py#L74
+ self.seasonality_projection = nn.Linear(config.d_model, config.feature_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ trend: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, AutoFormerDecoderOutput]:
+ r"""
+ Args:
+ trend (`torch.FloatTensor` of shape `(batch_size, prediction_length, feature_size)`, *optional*):
+ The trend sequence to be fed to the decoder.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If `use_cache` is True, `past_key_values` key value states are returned and can be used to speed up
+ decoding (see `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ input_shape = inputs_embeds.size()[:-1]
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(
+ inputs_embeds.size(), past_key_values_length=self.config.context_length - self.config.label_length
+ )
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ (hidden_states, residual_trend) = layer_outputs[0]
+ trend = trend + residual_trend
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # project seasonality representation
+ hidden_states = self.seasonality_projection(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, trend, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return AutoFormerDecoderOutput(
+ last_hidden_state=hidden_states,
+ trend=trend,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Autoformer Model outputting raw hidden-states without any specific head on top.",
+ AUTOFORMER_START_DOCSTRING,
+)
+class AutoformerModel(AutoformerPreTrainedModel):
+ def __init__(self, config: AutoformerConfig):
+ super().__init__(config)
+
+ if config.scaling == "mean" or config.scaling is True:
+ self.scaler = AutoformerMeanScaler(config)
+ elif config.scaling == "std":
+ self.scaler = AutoformerStdScaler(config)
+ else:
+ self.scaler = AutoformerNOPScaler(config)
+
+ if config.num_static_categorical_features > 0:
+ self.embedder = AutoformerFeatureEmbedder(
+ cardinalities=config.cardinality, embedding_dims=config.embedding_dimension
+ )
+
+ # transformer encoder-decoder and mask initializer
+ self.encoder = AutoformerEncoder(config)
+ self.decoder = AutoformerDecoder(config)
+
+ # used for decoder seasonal and trend initialization
+ self.decomposition_layer = AutoformerSeriesDecompositionLayer(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @property
+ def _past_length(self) -> int:
+ return self.config.context_length + max(self.config.lags_sequence)
+
+ def get_lagged_subsequences(
+ self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0
+ ) -> torch.Tensor:
+ """
+ Returns lagged subsequences of a given sequence. Returns a tensor of shape (batch_size, subsequences_length,
+ feature_size, indices_length), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i,
+ -indices[k]-subsequences_length+j, :].
+
+ Args:
+ sequence (`torch.Tensor` or shape `(batch_size, context_length,
+ feature_size)`): The sequence from which lagged subsequences should be extracted.
+ subsequences_length (`int`):
+ Length of the subsequences to be extracted.
+ shift (`int`, *optional* defaults to 0):
+ Shift the lags by this amount back in the time index.
+ """
+
+ # calculates the indices of the lags by subtracting the shift value from the given lags_sequence
+ indices = [lag - shift for lag in self.config.lags_sequence]
+
+ # checks if the maximum lag plus the length of the subsequences exceeds the length of the input sequence
+ sequence_length = sequence.shape[1]
+ if max(indices) + subsequences_length > sequence_length:
+ raise ValueError(
+ f"lags cannot go further than history length, found lag {max(indices)} "
+ f"while history length is only {sequence_length}"
+ )
+
+ # extracts the lagged subsequences from the input sequence using the calculated indices
+ lagged_values = []
+ for lag_index in indices:
+ begin_index = -lag_index - subsequences_length
+ end_index = -lag_index if lag_index > 0 else None
+ lagged_values.append(sequence[:, begin_index:end_index, ...])
+
+ # return as stacked tensor in the feature dimension
+ return torch.stack(lagged_values, dim=-1)
+
+ def create_network_inputs(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Creates the inputs for the network given the past and future values, time features, and static features.
+
+ Args:
+ past_values (`torch.Tensor`):
+ A tensor of shape `(batch_size, past_length, input_size)` containing the past values.
+ past_time_features (`torch.Tensor`):
+ A tensor of shape `(batch_size, past_length, num_features)` containing the past time features.
+ static_categorical_features (`Optional[torch.Tensor]`):
+ An optional tensor of shape `(batch_size, num_categorical_features)` containing the static categorical
+ features.
+ static_real_features (`Optional[torch.Tensor]`):
+ An optional tensor of shape `(batch_size, num_real_features)` containing the static real features.
+ past_observed_mask (`Optional[torch.Tensor]`):
+ An optional tensor of shape `(batch_size, past_length, input_size)` containing the mask of observed
+ values in the past.
+ future_values (`Optional[torch.Tensor]`):
+ An optional tensor of shape `(batch_size, future_length, input_size)` containing the future values.
+
+ Returns:
+ A tuple containing the following tensors:
+ - reshaped_lagged_sequence (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_lags *
+ input_size)` containing the lagged subsequences of the inputs.
+ - features (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_features)` containing the
+ concatenated static and time features.
+ - loc (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the mean of the input
+ values.
+ - scale (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the std of the input
+ values.
+ - static_feat (`torch.Tensor`): A tensor of shape `(batch_size, num_static_features)` containing the
+ concatenated static features.
+ """
+ # time feature
+ time_feat = (
+ torch.cat(
+ (
+ past_time_features[:, self._past_length - self.config.context_length :, ...],
+ future_time_features,
+ ),
+ dim=1,
+ )
+ if future_values is not None
+ else past_time_features[:, self._past_length - self.config.context_length :, ...]
+ )
+
+ # target
+ if past_observed_mask is None:
+ past_observed_mask = torch.ones_like(past_values)
+
+ context = past_values[:, -self.config.context_length :]
+ observed_context = past_observed_mask[:, -self.config.context_length :]
+ _, loc, scale = self.scaler(context, observed_context)
+
+ inputs = (
+ (torch.cat((past_values, future_values), dim=1) - loc) / scale
+ if future_values is not None
+ else (past_values - loc) / scale
+ )
+
+ # static features
+ log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p()
+ log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log()
+ static_feat = torch.cat((log_abs_loc, log_scale), dim=1)
+
+ if static_real_features is not None:
+ static_feat = torch.cat((static_real_features, static_feat), dim=1)
+ if static_categorical_features is not None:
+ embedded_cat = self.embedder(static_categorical_features)
+ static_feat = torch.cat((embedded_cat, static_feat), dim=1)
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)
+
+ # all features
+ features = torch.cat((expanded_static_feat, time_feat), dim=-1)
+
+ # lagged features
+ subsequences_length = (
+ self.config.context_length + self.config.prediction_length
+ if future_values is not None
+ else self.config.context_length
+ )
+ lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+
+ if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:
+ raise ValueError(
+ f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match"
+ )
+ return reshaped_lagged_sequence, features, loc, scale, static_feat
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=AutoformerModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[AutoformerModelOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import AutoformerModel
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly")
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_inputs, temporal_features, loc, scale, static_feat = self.create_network_inputs(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ )
+
+ if encoder_outputs is None:
+ enc_input = torch.cat(
+ (
+ transformer_inputs[:, : self.config.context_length, ...],
+ temporal_features[:, : self.config.context_length, ...],
+ ),
+ dim=-1,
+ )
+ encoder_outputs = self.encoder(
+ inputs_embeds=enc_input,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ if future_values is not None:
+ # Decoder inputs
+ # seasonality and trend from context length
+ seasonal_input, trend_input = self.decomposition_layer(
+ transformer_inputs[:, : self.config.context_length, ...]
+ )
+ mean = (
+ torch.mean(transformer_inputs[:, : self.config.context_length, ...], dim=1)
+ .unsqueeze(1)
+ .repeat(1, self.config.prediction_length, 1)
+ )
+ zeros = torch.zeros(
+ [transformer_inputs.shape[0], self.config.prediction_length, transformer_inputs.shape[2]],
+ device=enc_input.device,
+ )
+
+ decoder_input = torch.cat(
+ (
+ torch.cat((seasonal_input[:, -self.config.label_length :, ...], zeros), dim=1),
+ temporal_features[:, self.config.context_length - self.config.label_length :, ...],
+ ),
+ dim=-1,
+ )
+ trend_init = torch.cat(
+ (
+ torch.cat((trend_input[:, -self.config.label_length :, ...], mean), dim=1),
+ temporal_features[:, self.config.context_length - self.config.label_length :, ...],
+ ),
+ dim=-1,
+ )
+
+ decoder_outputs = self.decoder(
+ trend=trend_init,
+ inputs_embeds=decoder_input,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ else:
+ decoder_outputs = AutoFormerDecoderOutput()
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs + (loc, scale, static_feat)
+
+ return AutoformerModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ trend=decoder_outputs.trend,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ loc=loc,
+ scale=scale,
+ static_features=static_feat,
+ )
+
+
+@add_start_docstrings(
+ "The Autoformer Model with a distribution head on top for time-series forecasting.",
+ AUTOFORMER_START_DOCSTRING,
+)
+class AutoformerForPrediction(AutoformerPreTrainedModel):
+ def __init__(self, config: AutoformerConfig):
+ super().__init__(config)
+ self.model = AutoformerModel(config)
+ if config.distribution_output == "student_t":
+ self.distribution_output = StudentTOutput(dim=config.input_size)
+ elif config.distribution_output == "normal":
+ self.distribution_output = NormalOutput(dim=config.input_size)
+ elif config.distribution_output == "negative_binomial":
+ self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
+ else:
+ raise ValueError(f"Unknown distribution output {config.distribution_output}")
+
+ self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.feature_size)
+ self.target_shape = self.distribution_output.event_shape
+
+ if config.loss == "nll":
+ self.loss = nll
+ else:
+ raise ValueError(f"Unknown loss function {config.loss}")
+
+ # Initialize weights of distribution_output and apply final processing
+ self.post_init()
+
+ def output_params(self, decoder_output):
+ return self.parameter_projection(decoder_output[:, -self.config.prediction_length :, :])
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ @torch.jit.ignore
+ def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
+ sliced_params = params
+ if trailing_n is not None:
+ sliced_params = [p[:, -trailing_n:] for p in params]
+ return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
+
+ @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqTSPredictionOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ future_observed_mask: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqTSPredictionOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import AutoformerForPrediction
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly")
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> loss = outputs.loss
+ >>> loss.backward()
+
+ >>> # during inference, one only provides past values
+ >>> # as well as possible additional features
+ >>> # the model autoregressively generates future values
+ >>> outputs = model.generate(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> mean_prediction = outputs.sequences.mean(dim=1)
+ ```
+
+
+
+ The AutoformerForPrediction can also use static_real_features. To do so, set num_static_real_features in
+ AutoformerConfig based on number of such features in the dataset (in case of tourism_monthly dataset it
+ is equal to 1), initialize the model and call as shown below:
+
+ ```
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import AutoformerConfig, AutoformerForPrediction
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> # check number of static real features
+ >>> num_static_real_features = batch["static_real_features"].shape[-1]
+
+ >>> # load configuration of pretrained model and override num_static_real_features
+ >>> configuration = AutoformerConfig.from_pretrained(
+ ... "huggingface/autoformer-tourism-monthly",
+ ... num_static_real_features=num_static_real_features,
+ ... )
+ >>> # we also need to update feature_size as it is not recalculated
+ >>> configuration.feature_size += num_static_real_features
+
+ >>> model = AutoformerForPrediction(configuration)
+
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+ ```
+
+
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if future_values is not None:
+ use_cache = False
+
+ outputs = self.model(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ prediction_loss = None
+ params = None
+ if future_values is not None:
+ # outputs.last_hidden_state and trend
+ # loc is 4rd last and scale is 3rd last output
+ params = self.output_params(outputs[0] + outputs[1])
+ distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
+
+ loss = self.loss(distribution, future_values)
+
+ if future_observed_mask is None:
+ future_observed_mask = torch.ones_like(future_values)
+
+ if len(self.target_shape) == 0:
+ loss_weights = future_observed_mask
+ else:
+ loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
+
+ prediction_loss = weighted_average(loss, weights=loss_weights)
+
+ if not return_dict:
+ outputs = ((params,) + outputs[2:]) if params is not None else outputs[2:]
+ return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs
+
+ return Seq2SeqTSPredictionOutput(
+ loss=prediction_loss,
+ params=params,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ loc=outputs.loc,
+ scale=outputs.scale,
+ static_features=outputs.static_features,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ future_time_features: torch.Tensor,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) -> SampleTSPredictionOutput:
+ r"""
+ Greedily generate sequences of sample predictions from a model with a probability distribution head.
+
+ Parameters:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
+ Past values of the time series, that serve as context in order to predict the future. The sequence size
+ of this tensor must be larger than the `context_length` of the model, since the model will use the
+ larger size to construct lag features, i.e. additional values from the past which are added in order to
+ serve as "extra context".
+
+ The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
+ no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
+ look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
+ of the past.
+
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features,
+ such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
+
+ Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
+ of variates in the time series per time step.
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
+ Required time features, which the model internally will add to `past_values`. These could be things
+ like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
+ These could also be so-called "age" features, which basically help the model know "at which point in
+ life" a time-series is. Age features have small values for distant past time steps and increase
+ monotonically the more we approach the current time step. Holiday features are also a good example of
+ time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
+ Required time features for the prediction window, which the model internally will add to sampled
+ predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
+ (for instance as Fourier features). These could also be so-called "age" features, which basically help
+ the model know "at which point in life" a time-series is. Age features have small values for distant
+ past time steps and increase monotonically the more we approach the current time step. Holiday features
+ are also a good example of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to
+ the values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over
+ time).
+
+ A typical example of a static categorical feature is a time series ID.
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers.
+
+ Return:
+ [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
+ samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
+ multivariate predictions.
+ """
+ outputs = self(
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ past_time_features=past_time_features,
+ past_values=past_values,
+ past_observed_mask=past_observed_mask,
+ future_time_features=None,
+ future_values=None,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ use_cache=False,
+ )
+
+ decoder = self.model.get_decoder()
+ enc_last_hidden = outputs.encoder_last_hidden_state
+ loc = outputs.loc
+ scale = outputs.scale
+ static_feat = outputs.static_features
+
+ num_parallel_samples = self.config.num_parallel_samples
+ repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
+ repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_past_values = (
+ past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc
+ ) / repeated_scale
+
+ time_features = torch.cat((past_time_features, future_time_features), dim=1)
+
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_features.shape[1], -1)
+ features = torch.cat((expanded_static_feat, time_features), dim=-1)
+ repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ lagged_sequence = self.model.get_lagged_subsequences(
+ sequence=repeated_past_values, subsequences_length=self.config.context_length
+ )
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+ seasonal_input, trend_input = self.model.decomposition_layer(reshaped_lagged_sequence)
+
+ mean = torch.mean(reshaped_lagged_sequence, dim=1).unsqueeze(1).repeat(1, self.config.prediction_length, 1)
+ zeros = torch.zeros(
+ [reshaped_lagged_sequence.shape[0], self.config.prediction_length, reshaped_lagged_sequence.shape[2]],
+ device=reshaped_lagged_sequence.device,
+ )
+
+ decoder_input = torch.cat(
+ (
+ torch.cat((seasonal_input[:, -self.config.label_length :, ...], zeros), dim=1),
+ repeated_features[:, -self.config.prediction_length - self.config.label_length :, ...],
+ ),
+ dim=-1,
+ )
+ trend_init = torch.cat(
+ (
+ torch.cat((trend_input[:, -self.config.label_length :, ...], mean), dim=1),
+ repeated_features[:, -self.config.prediction_length - self.config.label_length :, ...],
+ ),
+ dim=-1,
+ )
+ decoder_outputs = decoder(
+ trend=trend_init, inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden
+ )
+ decoder_last_hidden = decoder_outputs.last_hidden_state
+ trend = decoder_outputs.trend
+ params = self.output_params(decoder_last_hidden + trend)
+ distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
+ future_samples = distr.sample()
+
+ return SampleTSPredictionOutput(
+ sequences=future_samples.reshape(
+ (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape,
+ )
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c20d7370c6566c7046797508eeff6036b3350f57
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bartpho/__init__.py
@@ -0,0 +1,42 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
+
+
+_import_structure = {}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_bartpho"] = ["BartphoTokenizer"]
+
+if TYPE_CHECKING:
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_bartpho import BartphoTokenizer
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..667127040ee5404ef939f8f566455bcdd37e19d9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2dc6be9a789d0072436025b433058516ca3a3e5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bartpho/__pycache__/tokenization_bartpho.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bartpho/tokenization_bartpho.py b/venv/lib/python3.10/site-packages/transformers/models/bartpho/tokenization_bartpho.py
new file mode 100644
index 0000000000000000000000000000000000000000..d936be41c2c78628d0bc9bb71bed7314e3e8707f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bartpho/tokenization_bartpho.py
@@ -0,0 +1,314 @@
+# coding=utf-8
+# Copyright 2021 VinAI Research and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+""" Tokenization classes for BARTpho-syllable model."""
+
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
+
+
+class BartphoTokenizer(PreTrainedTokenizer):
+ """
+ Adapted from [`XLMRobertaTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file. This vocabulary is the pre-trained SentencePiece model available from the
+ multilingual XLM-RoBERTa, also used in mBART, consisting of 250K types.
+ monolingual_vocab_file (`str`):
+ Path to the monolingual vocabulary file. This monolingual vocabulary consists of Vietnamese-specialized
+ types extracted from the multilingual vocabulary vocab_file of 250K types.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ monolingual_vocab_file,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.vocab_file = vocab_file
+ self.monolingual_vocab_file = monolingual_vocab_file
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(str(vocab_file))
+
+ # Load the reduced vocab
+
+ # Keep order of special tokens for backward compatibility
+ self.fairseq_tokens_to_ids = {}
+ cnt = 0
+ for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
+ if str(token) not in self.fairseq_tokens_to_ids:
+ self.fairseq_tokens_to_ids[str(token)] = cnt
+ cnt += 1
+ with open(monolingual_vocab_file, "r", encoding="utf-8") as f:
+ for line in f.readlines():
+ token = line.strip().split()[0]
+ self.fairseq_tokens_to_ids[token] = len(self.fairseq_tokens_to_ids)
+ if str(mask_token) not in self.fairseq_tokens_to_ids:
+ self.fairseq_tokens_to_ids[str(mask_token)] = len(self.fairseq_tokens_to_ids)
+
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An BARTPho sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ @property
+ def vocab_size(self):
+ return len(self.fairseq_ids_to_tokens)
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> List[str]:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if token in self.fairseq_tokens_to_ids:
+ return self.fairseq_tokens_to_ids[token]
+ else:
+ return self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.fairseq_ids_to_tokens[index]
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ out_monolingual_vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"],
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
+ out_monolingual_vocab_file
+ ) and os.path.isfile(self.monolingual_vocab_file):
+ copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file)
+ elif not os.path.isfile(self.monolingual_vocab_file):
+ with open(out_monolingual_vocab_file, "w", encoding="utf-8") as fp:
+ for token in self.fairseq_tokens_to_ids:
+ if token not in self.all_special_tokens:
+ fp.write(f"{str(token)} \n")
+
+ return out_vocab_file, out_monolingual_vocab_file
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dbrx/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/dbrx/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..693a544c4b3d3fe238a6ebd106a3235ee32e4fea
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/dbrx/__init__.py
@@ -0,0 +1,51 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_dbrx": ["DbrxConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_dbrx"] = [
+ "DbrxForCausalLM",
+ "DbrxModel",
+ "DbrxPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_dbrx import DbrxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_dbrx import DbrxForCausalLM, DbrxModel, DbrxPreTrainedModel
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cd806499ff495437bc0b0eb008b468af38c3819
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/configuration_dbrx.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/configuration_dbrx.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac4ce39f1ccd2414a4fb18fa17c0bcc36e8aa307
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/configuration_dbrx.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/modeling_dbrx.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/modeling_dbrx.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..663b8c9ba4cd28c3698ac684f3e74eb6e76f246c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dbrx/__pycache__/modeling_dbrx.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dbrx/configuration_dbrx.py b/venv/lib/python3.10/site-packages/transformers/models/dbrx/configuration_dbrx.py
new file mode 100644
index 0000000000000000000000000000000000000000..b03d2c17b09e0787fc09ce3fbe0d1d54b44801a3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/dbrx/configuration_dbrx.py
@@ -0,0 +1,257 @@
+# coding=utf-8
+# Copyright 2024 Databricks Mosaic Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DBRX model configuration """
+
+from typing import Any, Optional
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class DbrxAttentionConfig(PretrainedConfig):
+ """Configuration class for Dbrx Attention.
+
+ [`DbrxAttention`] class. It is used to instantiate attention layers
+ according to the specified arguments, defining the layers architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
+ The dropout probability for the attention layers.
+ clip_qkv (`float`, *optional*):
+ If set, clip the queries, keys, and values in the attention layer to this value.
+ kv_n_heads (`Optional[int]`, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads.
+ rope_theta (`float`, defaults to 10000.0): The base frequency for rope.
+ """
+
+ def __init__(
+ self,
+ attn_pdrop: float = 0.0,
+ clip_qkv: Optional[float] = None,
+ kv_n_heads: int = 1,
+ rope_theta: float = 10000.0,
+ **kwargs: Any,
+ ):
+ super().__init__(**kwargs)
+ self.attn_pdrop = attn_pdrop
+ self.clip_qkv = clip_qkv
+ self.kv_n_heads = kv_n_heads
+ self.rope_theta = rope_theta
+
+ for k in ["model_type"]:
+ if k in kwargs:
+ kwargs.pop(k)
+ if len(kwargs) != 0:
+ raise ValueError(f"Found unknown {kwargs=}")
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs: Any) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ if config_dict.get("model_type") == "dbrx":
+ config_dict = config_dict["attn_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class DbrxFFNConfig(PretrainedConfig):
+ """Configuration class for Dbrx FFN.
+
+ [`DbrxFFN`] class. It is used to instantiate feedforward layers according to
+ the specified arguments, defining the layers architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ ffn_act_fn (`dict`, *optional*, defaults to `None`): A dict specifying activation function for the FFN.
+ The dict should have a key 'name' with the value being the name of the activation function along with
+ any additional keyword arguments. If `None`, then set to `{"name": "silu"}`.
+ ffn_hidden_size (`int`, defaults to 3584): The hidden size of the feedforward network.
+ moe_num_experts (`int`, defaults to 4): The number of experts in the mixture of experts layer.
+ moe_top_k (`int`, defaults to 1): The number of experts to use in the mixture of experts layer.
+ moe_jitter_eps (`float`, *optional*, defaults to `None`): If not `None`, the jitter epsilon for the mixture of experts layer.
+ moe_loss_weight (`float`, defaults to 0.01): The loss weight for the mixture of experts layer.
+ moe_normalize_expert_weights (`float`, *optional*, defaults to 1.0): The normalization factor for the expert weights.
+ """
+
+ def __init__(
+ self,
+ ffn_act_fn: dict = None,
+ ffn_hidden_size: int = 3584,
+ moe_num_experts: int = 4,
+ moe_top_k: int = 1,
+ moe_jitter_eps: Optional[float] = None,
+ moe_loss_weight: float = 0.01,
+ moe_normalize_expert_weights: Optional[float] = 1.0,
+ **kwargs: Any,
+ ):
+ super().__init__()
+ if ffn_act_fn is None:
+ ffn_act_fn = {"name": "silu"}
+ self.ffn_act_fn = ffn_act_fn
+ self.ffn_hidden_size = ffn_hidden_size
+ self.moe_num_experts = moe_num_experts
+ self.moe_top_k = moe_top_k
+ self.moe_jitter_eps = moe_jitter_eps
+ self.moe_loss_weight = moe_loss_weight
+ self.moe_normalize_expert_weights = moe_normalize_expert_weights
+
+ for k in ["model_type"]:
+ if k in kwargs:
+ kwargs.pop(k)
+ if len(kwargs) != 0:
+ raise ValueError(f"Found unknown {kwargs=}")
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs: Any) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ if config_dict.get("model_type") == "dbrx":
+ config_dict = config_dict["ffn_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class DbrxConfig(PretrainedConfig):
+ r"""
+
+ This is the configuration class to store the configuration of a [`DbrxModel`]. It is used to instantiate a Dbrx model according to the
+ specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a different configuration to that of the [databricks/dbrx-instruct](https://huggingface.co/databricks/dbrx-instruct) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ d_model (`int`, *optional*, defaults to 2048):
+ Dimensionality of the embeddings and hidden states.
+ n_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ n_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ max_seq_len (`int`, *optional*, defaults to 2048):
+ The maximum sequence length of the model.
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the Dbrx model. Defines the maximum number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`DbrxModel`].
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
+ The dropout probability applied to the attention output before combining with residual.
+ emb_pdrop (`float`, *optional*, defaults to 0.0):
+ The dropout probability for the embedding layer.
+ attn_config (`dict`, *optional*):
+ A dictionary used to configure the model's attention module.
+ ffn_config (`dict`, *optional*):
+ A dictionary used to configure the model's FFN module.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ output_router_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not the router logits should be returned by the model. Enabling this will also
+ allow the model to output the auxiliary loss. See [here]() for more details.
+
+
+ Example:
+ ```python
+ >>> from transformers import DbrxConfig, DbrxModel
+
+ >>> # Initializing a Dbrx configuration
+ >>> configuration = DbrxConfig(n_layers=2, d_model=256, n_heads=8, vocab_size=128)
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = DbrxModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+ """
+
+ model_type = "dbrx"
+ attribute_map = {
+ "num_attention_heads": "n_heads",
+ "hidden_size": "d_model",
+ "num_hidden_layers": "n_layers",
+ "max_position_embeddings": "max_seq_len",
+ }
+
+ def __init__(
+ self,
+ d_model: int = 2048,
+ n_heads: int = 16,
+ n_layers: int = 24,
+ max_seq_len: int = 2048,
+ vocab_size: int = 32000,
+ resid_pdrop: float = 0.0,
+ emb_pdrop: float = 0.0,
+ attn_config: Optional[DbrxAttentionConfig] = None,
+ ffn_config: Optional[DbrxFFNConfig] = None,
+ use_cache: bool = True,
+ initializer_range: float = 0.02,
+ output_router_logits: bool = False,
+ **kwargs: Any,
+ ):
+ if attn_config is None:
+ self.attn_config = DbrxAttentionConfig()
+ elif isinstance(attn_config, dict):
+ self.attn_config = DbrxAttentionConfig(**attn_config)
+ else:
+ self.attn_config = attn_config
+
+ if ffn_config is None:
+ self.ffn_config = DbrxFFNConfig()
+ elif isinstance(ffn_config, dict):
+ self.ffn_config = DbrxFFNConfig(**ffn_config)
+ else:
+ self.ffn_config = ffn_config
+
+ self.d_model = d_model
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.max_seq_len = max_seq_len
+ self.vocab_size = vocab_size
+ self.resid_pdrop = resid_pdrop
+ self.emb_pdrop = emb_pdrop
+ self.use_cache = use_cache
+ self.initializer_range = initializer_range
+ self.output_router_logits = output_router_logits
+
+ tie_word_embeddings = kwargs.pop("tie_word_embeddings", False)
+ if tie_word_embeddings:
+ raise ValueError("tie_word_embeddings is not supported for DBRX models.")
+
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dbrx/modeling_dbrx.py b/venv/lib/python3.10/site-packages/transformers/models/dbrx/modeling_dbrx.py
new file mode 100644
index 0000000000000000000000000000000000000000..99b865c773f81da489d546863832febae0388ab6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/dbrx/modeling_dbrx.py
@@ -0,0 +1,1523 @@
+# coding=utf-8
+# Copyright 2024 Databricks Mosaic Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch DBRX model. """
+
+import math
+from typing import Any, Dict, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache, StaticCache
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_dbrx import DbrxConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "DbrxConfig"
+
+
+# Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with Gemma->Dbrx
+class DbrxRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ self.register_buffer("inv_freq", None, persistent=False)
+
+ @torch.no_grad()
+ def forward(self, x, position_ids, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if self.inv_freq is None:
+ self.inv_freq = 1.0 / (
+ self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
+ )
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 since bfloat16 loses precision on long contexts
+ # See https://github.com/huggingface/transformers/pull/29285
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+# Copied from transformers.models.llama.modeling_llama.rotate_half
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+# Copied from transformers.models.llama.modeling_llama.repeat_kv
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def load_balancing_loss_func(
+ gate_logits: torch.Tensor,
+ num_experts: int,
+ top_k: int,
+ attention_mask: Optional[torch.Tensor],
+) -> torch.Tensor:
+ r"""Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
+
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
+ experts is too unbalanced.
+
+ Args:
+ gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
+ shape [batch_size X sequence_length, num_experts].
+ num_experts (`int`):
+ Number of experts.
+ top_k (`int`):
+ The number of experts each token is routed to.
+ attention_mask (`torch.Tensor`, None):
+ The attention_mask used in forward function
+ shape [batch_size X sequence_length] if not None.
+
+ Returns:
+ The auxiliary loss.
+ """
+ if gate_logits is None or not isinstance(gate_logits, tuple):
+ return torch.tensor(0.0)
+
+ if isinstance(gate_logits, tuple):
+ compute_device = gate_logits[0].device
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
+
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
+
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
+
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
+
+ if attention_mask is None:
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
+ else:
+ batch_size, sequence_length = attention_mask.shape
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
+ expert_attention_mask = (
+ attention_mask[None, :, :, None, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
+ .reshape(-1, top_k, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
+ expert_attention_mask, dim=0
+ )
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
+ router_per_expert_attention_mask = (
+ attention_mask[None, :, :, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
+ .reshape(-1, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
+ router_per_expert_attention_mask, dim=0
+ )
+
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
+ return overall_loss * num_experts
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+class DbrxAttention(nn.Module):
+ """Multi-head self attention."""
+
+ def __init__(self, config: DbrxConfig, block_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.d_model
+ self.num_heads = config.n_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.max_position_embeddings = config.max_seq_len
+ self.block_idx = block_idx
+ if block_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing a `block_idx` is not recommended and will "
+ + "lead to errors during the forward call if caching is used. Please make sure to provide a `block_idx` "
+ + "when creating this class."
+ )
+
+ attn_config = config.attn_config
+ self.attn_pdrop = attn_config.attn_pdrop
+ self.clip_qkv = attn_config.clip_qkv
+ self.num_key_value_heads = attn_config.kv_n_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.rope_theta = attn_config.rope_theta
+ self.is_causal = True
+
+ self.Wqkv = nn.Linear(
+ self.hidden_size, self.hidden_size + 2 * self.num_key_value_heads * self.head_dim, bias=False
+ )
+ self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
+ self.rotary_emb = DbrxRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_ids: torch.LongTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Any,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ qkv_states = self.Wqkv(hidden_states)
+ min_val = -self.clip_qkv if self.clip_qkv is not None else None
+ max_val = self.clip_qkv
+ qkv_states = qkv_states.clamp(min=min_val, max=max_val)
+
+ query_states, key_states, value_states = qkv_states.split(
+ [
+ self.hidden_size,
+ self.num_key_value_heads * self.head_dim,
+ self.num_key_value_heads * self.head_dim,
+ ],
+ dim=2,
+ )
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attention_mask is not None: # no matter the length, we just slice it
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attn_pdrop, training=self.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ + f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+ attn_output = self.out_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+class DbrxFlashAttention2(DbrxAttention):
+ """Dbrx flash attention module.
+
+ This module inherits from `DbrxAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it
+ calls the public API of flash attention.
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ # From: https://github.com/huggingface/transformers/blob/3b8e2932ce743008f63585aae1e1b8b30dc8b3ac/src/transformers/models/gemma/modeling_gemma.py#L318
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Any,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ logger.info("Implicitly setting `output_attentions` to False as it is not supported in Flash Attention.")
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ qkv_states = self.Wqkv(hidden_states)
+ if self.clip_qkv is not None:
+ qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
+
+ query_states, key_states, value_states = qkv_states.split(
+ [
+ self.hidden_size,
+ self.num_key_value_heads * self.head_dim,
+ self.num_key_value_heads * self.head_dim,
+ ],
+ dim=2,
+ )
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout
+ # [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ dropout_rate = self.attn_pdrop if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = query_states.dtype
+
+ logger.warning_once(
+ "The input hidden states seems to be silently casted in float32, this might be "
+ + "related to the fact you have upcasted embedding or layer norm layers in "
+ + f"float32. We will cast back the input in {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ q_len,
+ dropout=dropout_rate,
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.out_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class DbrxSdpaAttention(DbrxAttention):
+ """
+ Dbrx attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `DbrxAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "DbrxModel is using DbrxSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ qkv_states = self.Wqkv(hidden_states)
+ if self.clip_qkv is not None:
+ qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
+
+ query_states, key_states, value_states = qkv_states.split(
+ [
+ self.hidden_size,
+ self.num_key_value_heads * self.head_dim,
+ self.num_key_value_heads * self.head_dim,
+ ],
+ dim=2,
+ )
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = self.rotary_emb(value_states, position_ids, seq_len=None)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, None)
+
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ causal_mask = attention_mask
+ if attention_mask is not None:
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and causal_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=causal_mask,
+ dropout_p=self.attn_pdrop if self.training else 0.0,
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(bsz, q_len, -1)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+DBRX_ATTENTION_CLASSES = {
+ "eager": DbrxAttention,
+ "flash_attention_2": DbrxFlashAttention2,
+ "sdpa": DbrxSdpaAttention,
+}
+
+
+class DbrxNormAttentionNorm(nn.Module):
+ def __init__(self, config: DbrxConfig, block_idx: Optional[int] = None):
+ super().__init__()
+ self.block_idx = block_idx
+ self.resid_pdrop = config.resid_pdrop
+ self.norm_1 = nn.LayerNorm(config.d_model, bias=False)
+ self.attn = DBRX_ATTENTION_CLASSES[config._attn_implementation](
+ config=config,
+ block_idx=block_idx,
+ )
+ self.norm_2 = nn.LayerNorm(config.d_model, bias=False)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_ids: torch.LongTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Any,
+ ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
+ residual_states = hidden_states
+ hidden_states = self.norm_1(hidden_states).to(hidden_states.dtype)
+
+ hidden_states, attn_weights, past_key_value = self.attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
+ hidden_states = hidden_states + residual_states
+
+ residual_states = hidden_states
+ hidden_states = self.norm_2(hidden_states).to(hidden_states.dtype)
+
+ return residual_states, hidden_states, attn_weights, past_key_value
+
+
+class DbrxRouter(nn.Module):
+ def __init__(
+ self,
+ hidden_size: int,
+ moe_num_experts: int,
+ moe_top_k: int,
+ moe_jitter_eps: Optional[float],
+ moe_normalize_expert_weights: Optional[float],
+ ):
+ super().__init__()
+ self.hidden_size = hidden_size
+ self.moe_num_experts = moe_num_experts
+ self.moe_top_k = moe_top_k
+ self.moe_jitter_eps = moe_jitter_eps
+ self.moe_normalize_expert_weights = moe_normalize_expert_weights
+
+ self.layer = nn.Linear(self.hidden_size, self.moe_num_experts, bias=False)
+
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.LongTensor]:
+ if self.training and self.moe_jitter_eps is not None:
+ hidden_states *= torch.empty_like(hidden_states).uniform_(
+ 1.0 - self.moe_jitter_eps, 1.0 + self.moe_jitter_eps
+ )
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
+ weights = self.layer(hidden_states).softmax(dim=-1, dtype=torch.float32)
+ top_weights, top_experts = torch.topk(weights, self.moe_top_k, dim=-1)
+
+ top_weights_scale = (
+ torch.norm(top_weights, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True)
+ if self.moe_normalize_expert_weights is not None
+ else 1.0
+ )
+ top_weights = top_weights / top_weights_scale
+
+ weights = weights.to(hidden_states.dtype)
+ top_weights = top_weights.to(hidden_states.dtype)
+ return weights, top_weights, top_experts
+
+
+class DbrxExpertGLU(nn.Module):
+ def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
+ super().__init__()
+ self.hidden_size = hidden_size
+ self.ffn_hidden_size = ffn_hidden_size
+ self.moe_num_experts = moe_num_experts
+
+ self.w1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
+ self.v1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
+ self.w2 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
+
+ act_fn_name = ffn_act_fn.get("name", "silu")
+ self.activation_fn = ACT2FN[act_fn_name]
+
+ def forward(
+ self, x: torch.Tensor, expert_w1: torch.Tensor, expert_v1: torch.Tensor, expert_w2: torch.Tensor
+ ) -> torch.Tensor:
+ gate_proj = x.matmul(expert_w1.t())
+ up_proj = x.matmul(expert_v1.t())
+ gate_proj = self.activation_fn(gate_proj)
+ intermediate_states = gate_proj * up_proj
+ down_proj = intermediate_states.matmul(expert_w2)
+ return down_proj
+
+
+class DbrxExperts(nn.Module):
+ def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
+ super().__init__()
+ self.moe_num_experts = moe_num_experts
+ self.mlp = DbrxExpertGLU(
+ hidden_size=hidden_size,
+ ffn_hidden_size=ffn_hidden_size,
+ moe_num_experts=moe_num_experts,
+ ffn_act_fn=ffn_act_fn,
+ )
+
+ def forward(
+ self, x: torch.Tensor, weights: torch.Tensor, top_weights: torch.Tensor, top_experts: torch.LongTensor
+ ) -> torch.Tensor:
+ bsz, q_len, hidden_size = x.shape
+ x = x.view(-1, hidden_size)
+ out = torch.zeros_like(x)
+
+ expert_mask = nn.functional.one_hot(top_experts, num_classes=self.moe_num_experts).permute(2, 1, 0)
+ # Chunk experts at once to avoid storing full parameter multiple times in autograd
+ w1_chunked = self.mlp.w1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
+ self.moe_num_experts, dim=0
+ )
+ v1_chunked = self.mlp.v1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
+ self.moe_num_experts, dim=0
+ )
+ w2_chunked = self.mlp.w2.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(
+ self.moe_num_experts, dim=0
+ )
+ w1_chunked = [w1.squeeze(dim=0) for w1 in w1_chunked]
+ v1_chunked = [v1.squeeze(dim=0) for v1 in v1_chunked]
+ w2_chunked = [w2.squeeze(dim=0) for w2 in w2_chunked]
+ for expert_idx in range(0, self.moe_num_experts):
+ topk_idx, token_idx = torch.where(expert_mask[expert_idx])
+ if token_idx.shape[0] == 0:
+ continue
+
+ token_list = token_idx
+ topk_list = topk_idx
+
+ expert_tokens = x[None, token_list].reshape(-1, hidden_size)
+ expert_out = (
+ self.mlp(expert_tokens, w1_chunked[expert_idx], v1_chunked[expert_idx], w2_chunked[expert_idx])
+ * top_weights[token_list, topk_list, None]
+ )
+
+ out.index_add_(0, token_idx, expert_out)
+
+ out = out.reshape(bsz, q_len, hidden_size)
+ return out
+
+
+class DbrxFFN(nn.Module):
+ def __init__(self, config: DbrxConfig):
+ super().__init__()
+
+ ffn_config = config.ffn_config
+ self.router = DbrxRouter(
+ hidden_size=config.d_model,
+ moe_num_experts=ffn_config.moe_num_experts,
+ moe_top_k=ffn_config.moe_top_k,
+ moe_jitter_eps=ffn_config.moe_jitter_eps,
+ moe_normalize_expert_weights=ffn_config.moe_normalize_expert_weights,
+ )
+
+ self.experts = DbrxExperts(
+ hidden_size=config.d_model,
+ ffn_hidden_size=ffn_config.ffn_hidden_size,
+ moe_num_experts=ffn_config.moe_num_experts,
+ ffn_act_fn=ffn_config.ffn_act_fn,
+ )
+
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ weights, top_weights, top_experts = self.router(x)
+ out = self.experts(x, weights, top_weights, top_experts)
+ return out, weights
+
+
+class DbrxBlock(nn.Module):
+ def __init__(self, config: DbrxConfig, block_idx: int):
+ super().__init__()
+ self.hidden_size = config.d_model
+ self.resid_pdrop = config.resid_pdrop
+ self.block_idx = block_idx
+ self.norm_attn_norm = DbrxNormAttentionNorm(
+ config=config,
+ block_idx=block_idx,
+ )
+ self.ffn = DbrxFFN(config=config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: torch.LongTensor = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: Optional[bool] = False,
+ output_router_logits: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Any,
+ ) -> Union[
+ Tuple[torch.Tensor],
+ Tuple[torch.Tensor, Optional[torch.Tensor]],
+ Tuple[torch.Tensor, Optional[Cache]],
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]],
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]],
+ Tuple[torch.Tensor, Optional[Cache], Optional[torch.Tensor]],
+ Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache], Optional[torch.Tensor]],
+ ]:
+ """Forward function for DbrxBlock.
+
+ Args:
+ hidden_states (`torch.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ position_ids (`torch.LongTensor`): position ids of shape `(batch, seq_len)`
+ attention_mask (`torch.Tensor`, optional): attention mask of size (batch_size, sequence_length)
+ if flash attention is used or (batch_size, 1, query_sequence_length, key_sequence_length)
+ if default attention is used.
+ past_key_value (`Tuple(torch.Tensor)`, optional): cached past key and value projection states
+ output_attentions (`bool`, optional): Whether or not to return the attentions tensors of all
+ attention layers. See `attentions` under returned tensors for more detail.
+ output_router_logits (`bool`, optional): Whether or not to return the router logits.
+ use_cache (`bool`, optional): If set to `True`, `past_key_values` key value states are
+ returned and can be used to speed up decoding (see `past_key_values`).
+ cache_position (`torch.LongTensor`, optional): position ids of the cache
+ """
+
+ # Norm + Attention + Norm
+ resid_states, hidden_states, self_attn_weights, present_key_value = self.norm_attn_norm(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ # Fully Connected
+ hidden_states, router_logits = self.ffn(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
+ hidden_states = resid_states + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ if output_router_logits:
+ outputs += (router_logits,)
+
+ return outputs
+
+
+DBRX_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`DbrxConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare DBRX Model outputting raw hidden-states without any specific head on top.",
+ DBRX_START_DOCSTRING,
+)
+class DbrxPreTrainedModel(PreTrainedModel):
+ config_class = DbrxConfig
+ base_model_prefix = "transformer"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["DbrxBlock"]
+ _skip_keys_device_placement = ["past_key_values"]
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True
+
+ def _init_weights(self, module: nn.Module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, DbrxExpertGLU):
+ module.w1.data.normal_(mean=0.0, std=std)
+ module.v1.data.normal_(mean=0.0, std=std)
+ module.w2.data.normal_(mean=0.0, std=std)
+
+ def _setup_cache(self, cache_cls: Any, max_batch_size: int, max_cache_len: int):
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
+ raise ValueError(
+ "`static` cache implementation is not compatible with "
+ + "`attn_implementation==flash_attention_2`. Make sure to use "
+ + "`spda` in the mean time and open an issue at https://github.com/huggingface/transformers."
+ )
+
+ for block in self.transformer.blocks:
+ device = block.norm_attn_norm.norm_1.weight.device
+ if hasattr(self.config, "_pre_quantization_dtype"):
+ dtype = self.config._pre_quantization_dtype
+ else:
+ dtype = block.norm_attn_norm.attn.out_proj.weight.dtype
+ block.norm_attn_norm.attn.past_key_value = cache_cls(
+ self.config, max_batch_size, max_cache_len, device=device, dtype=dtype
+ )
+
+ def _reset_cache(self):
+ for block in self.transformer.blocks:
+ block.norm_attn_norm.attn.past_key_value = None
+
+
+DBRX_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance;
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
+ should not be returned during inference.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ "The bare DBRX Model outputting raw hidden-states without any specific head on top.",
+ DBRX_START_DOCSTRING,
+)
+class DbrxModel(DbrxPreTrainedModel):
+ """Transformer decoder consisting of *config.num_hidden_layers*. Each layer is a [`DbrxBlock`] layer.
+
+ Args:
+ config ([`DbrxConfig`]): Model configuration class with all parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ """
+
+ def __init__(self, config: DbrxConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+ self.emb_pdrop = config.emb_pdrop
+
+ self.wte = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
+ self.blocks = nn.ModuleList([DbrxBlock(config, block_idx) for block_idx in range(config.n_layers)])
+ self.norm_f = nn.LayerNorm(config.d_model, bias=False)
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.wte
+
+ def set_input_embeddings(self, value: nn.Embedding):
+ self.wte = value
+
+ @add_start_docstrings_to_model_forward(DBRX_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError(
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
+ )
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.wte(input_ids)
+
+ inputs_embeds = nn.functional.dropout(inputs_embeds, p=self.emb_pdrop, training=self.training)
+
+ past_seen_tokens = 0
+ if use_cache: # kept for BC (cache positions)
+ if not isinstance(past_key_values, StaticCache):
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ past_seen_tokens = past_key_values.get_seq_length()
+
+ if cache_position is None:
+ if isinstance(past_key_values, StaticCache):
+ raise ValueError("cache_position is a required argument when using StaticCache.")
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
+
+ # embed positions
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_router_logits = () if output_router_logits else None
+ next_decoder_cache = None
+
+ for block in self.blocks:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ block_outputs = self._gradient_checkpointing_func(
+ block.__call__,
+ hidden_states,
+ causal_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ output_router_logits,
+ use_cache,
+ cache_position,
+ )
+ else:
+ block_outputs = block(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ output_router_logits=output_router_logits,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ )
+
+ hidden_states = block_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = block_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (block_outputs[1],)
+
+ if output_router_logits:
+ all_router_logits += (block_outputs[-1],)
+
+ hidden_states = self.norm_f(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = (
+ next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
+ )
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
+ if v is not None
+ )
+ return MoeModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ router_logits=all_router_logits,
+ )
+
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
+ def _update_causal_mask(
+ self, attention_mask: Optional[torch.Tensor], input_tensor: torch.Tensor, cache_position: torch.Tensor
+ ) -> Optional[torch.Tensor]:
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ min_dtype = torch.finfo(dtype).min
+ sequence_length = input_tensor.shape[1]
+ if hasattr(self.blocks[0].norm_attn_norm.attn, "past_key_value"): # static cache
+ target_length = self.config.max_position_embeddings
+ else: # dynamic cache
+ target_length = (
+ attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else cache_position[-1] + 1
+ )
+ target_length = int(target_length)
+
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ if attention_mask.dim() == 2:
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
+ elif attention_mask.dim() == 4:
+ # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
+ # cache. In that case, the 4D attention mask attends to the newest tokens only.
+ if attention_mask.shape[-2] < cache_position[0] + sequence_length:
+ offset = cache_position[0]
+ else:
+ offset = 0
+ mask_shape = attention_mask.shape
+ mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
+ causal_mask[
+ : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
+ ] = mask_slice
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ ):
+ # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).
+ is_tracing = (
+ torch.jit.is_tracing()
+ or isinstance(input_tensor, torch.fx.Proxy)
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
+ )
+ if not is_tracing and torch.any(attention_mask != 1):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+
+@add_start_docstrings("The DBRX Model transformer for causal language modeling.", DBRX_START_DOCSTRING)
+class DbrxForCausalLM(DbrxPreTrainedModel):
+ def __init__(self, config: DbrxConfig):
+ super().__init__(config)
+ self.transformer = DbrxModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+ self.moe_loss_weight = config.ffn_config.moe_loss_weight
+ self.num_experts = config.ffn_config.moe_num_experts
+ self.num_experts_per_tok = config.ffn_config.moe_top_k
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.transformer.get_input_embeddings()
+
+ def set_input_embeddings(self, value: nn.Embedding):
+ self.transformer.set_input_embeddings(value)
+
+ def get_output_embeddings(self) -> nn.Linear:
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings: nn.Linear):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder: DbrxModel):
+ self.transformer = decoder
+
+ def get_decoder(self) -> DbrxModel:
+ return self.transformer
+
+ @add_start_docstrings_to_model_forward(DBRX_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
+ r"""Forward function for causal language modeling.
+
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >> from transformers import AutoTokenizer, DbrxForCausalLM
+
+ >> model = DbrxForCausalLM.from_pretrained("databricks/dbrx-instruct")
+ >> tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct")
+
+ >> prompt = "Hey, are you conscious? Can you talk to me?"
+ >> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >> # Generate
+ >> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_router_logits=output_router_logits,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = nn.CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ aux_loss = None
+ if output_router_logits:
+ aux_loss = load_balancing_loss_func(
+ outputs.router_logits if return_dict else outputs[-1],
+ self.num_experts,
+ self.num_experts_per_tok,
+ attention_mask,
+ )
+ if labels is not None and loss is not None:
+ loss += self.moe_loss_weight * aux_loss.to(loss.device) # make sure to reside in the same device
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ if output_router_logits:
+ output = (aux_loss,) + output
+ return (loss,) + output if loss is not None else output
+
+ return MoeCausalLMOutputWithPast(
+ loss=loss,
+ aux_loss=aux_loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ router_logits=outputs.router_logits,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids: torch.Tensor,
+ past_key_values: Optional[Cache] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ **kwargs: Any,
+ ) -> Dict[str, Any]:
+ past_length = 0
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = past_key_values.get_max_length()
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ if self.generation_config.cache_implementation == "static":
+ # generation with static cache
+ cache_position = kwargs.get("cache_position", None)
+ if cache_position is None:
+ past_length = 0
+ else:
+ past_length = cache_position[-1] + 1
+ input_ids = input_ids[:, past_length:]
+ position_ids = position_ids[:, past_length:] if position_ids is not None else None
+
+ # TODO @gante we should only keep a `cache_position` in generate, and do +=1.
+ # same goes for position ids. Could also help with continued generation.
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
+ position_ids = position_ids.contiguous() if position_ids is not None else None
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
+ # TODO: use `next_tokens` directly instead.
+ model_inputs = {"input_ids": input_ids.contiguous()}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "cache_position": cache_position,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values: Cache, beam_idx: torch.LongTensor):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/esm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b07db5a5eea64b8e5d37cf2c9c89429586ea8fe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/__init__.py
@@ -0,0 +1,94 @@
+# Copyright 2022 Facebook and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig"],
+ "tokenization_esm": ["EsmTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_esm"] = [
+ "ESM_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "EsmForMaskedLM",
+ "EsmForSequenceClassification",
+ "EsmForTokenClassification",
+ "EsmModel",
+ "EsmPreTrainedModel",
+ ]
+ _import_structure["modeling_esmfold"] = ["EsmForProteinFolding", "EsmFoldPreTrainedModel"]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_esm"] = [
+ "TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFEsmForMaskedLM",
+ "TFEsmForSequenceClassification",
+ "TFEsmForTokenClassification",
+ "TFEsmModel",
+ "TFEsmPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig
+ from .tokenization_esm import EsmTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_esm import (
+ ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
+ EsmForMaskedLM,
+ EsmForSequenceClassification,
+ EsmForTokenClassification,
+ EsmModel,
+ EsmPreTrainedModel,
+ )
+ from .modeling_esmfold import EsmFoldPreTrainedModel, EsmForProteinFolding
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_esm import (
+ TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFEsmForMaskedLM,
+ TFEsmForSequenceClassification,
+ TFEsmForTokenClassification,
+ TFEsmModel,
+ TFEsmPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd73679d84200acb62cd3e03fa123113e608d2d1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/configuration_esm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/configuration_esm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..51ed38dfcde2a393e83f3533f6943a19db786d0b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/configuration_esm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/convert_esm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/convert_esm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f99f3c2759577e51638b38f058e34a9c648c81f0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/convert_esm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e8183a719183d45ad972aec4b13b305c9d5c6faa
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esmfold.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esmfold.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26653b87ed8377d3d36d3ee2cc0463afe809addc
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_esmfold.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_tf_esm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_tf_esm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a63615a4c3ef6ac36e489e38f0070c430f598d82
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/modeling_tf_esm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/tokenization_esm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/tokenization_esm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79de33e47f627e3f74c0c07f94784c4a82f360bb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/__pycache__/tokenization_esm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/configuration_esm.py b/venv/lib/python3.10/site-packages/transformers/models/esm/configuration_esm.py
new file mode 100644
index 0000000000000000000000000000000000000000..31d309cb04a0175d6865d7f79f5f27241a264960
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/configuration_esm.py
@@ -0,0 +1,361 @@
+# coding=utf-8
+# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ESM model configuration"""
+
+from dataclasses import asdict, dataclass
+from typing import Optional
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+# TODO Update this
+
+from ..deprecated._archive_maps import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class EsmConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ESMModel`]. It is used to instantiate a ESM model
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the ESM
+ [facebook/esm-1b](https://huggingface.co/facebook/esm-1b) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*):
+ Vocabulary size of the ESM model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`ESMModel`].
+ mask_token_id (`int`, *optional*):
+ The index of the mask token in the vocabulary. This must be included in the config because of the
+ "mask-dropout" scaling trick, which will scale the inputs depending on the number of masked tokens.
+ pad_token_id (`int`, *optional*):
+ The index of the padding token in the vocabulary. This must be included in the config because certain parts
+ of the ESM code use this instead of the attention mask.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 1026):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query", "rotary"`.
+ For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ is_decoder (`bool`, *optional*, defaults to `False`):
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ emb_layer_norm_before (`bool`, *optional*):
+ Whether to apply layer normalization after embeddings but before the main stem of the network.
+ token_dropout (`bool`, defaults to `False`):
+ When this is enabled, masked tokens are treated as if they had been dropped out by input dropout.
+
+ Examples:
+
+ ```python
+ >>> from transformers import EsmModel, EsmConfig
+
+ >>> # Initializing a ESM facebook/esm-1b style configuration >>> configuration = EsmConfig()
+
+ >>> # Initializing a model from the configuration >>> model = ESMModel(configuration)
+
+ >>> # Accessing the model configuration >>> configuration = model.config
+ ```"""
+
+ model_type = "esm"
+
+ def __init__(
+ self,
+ vocab_size=None,
+ mask_token_id=None,
+ pad_token_id=None,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=1026,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ position_embedding_type="absolute",
+ use_cache=True,
+ emb_layer_norm_before=None,
+ token_dropout=False,
+ is_folding_model=False,
+ esmfold_config=None,
+ vocab_list=None,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, mask_token_id=mask_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+ self.emb_layer_norm_before = emb_layer_norm_before
+ self.token_dropout = token_dropout
+ self.is_folding_model = is_folding_model
+ if is_folding_model:
+ if esmfold_config is None:
+ logger.info("No esmfold_config supplied for folding model, using default values.")
+ esmfold_config = EsmFoldConfig()
+ elif isinstance(esmfold_config, dict):
+ esmfold_config = EsmFoldConfig(**esmfold_config)
+ self.esmfold_config = esmfold_config
+ if vocab_list is None:
+ logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!")
+ self.vocab_list = get_default_vocab_list()
+ else:
+ self.vocab_list = vocab_list
+ else:
+ self.esmfold_config = None
+ self.vocab_list = None
+ if self.esmfold_config is not None and getattr(self.esmfold_config, "use_esm_attn_map", False):
+ raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!")
+
+ def to_dict(self):
+ """
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
+
+ Returns:
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
+ """
+ output = super().to_dict()
+ if isinstance(self.esmfold_config, EsmFoldConfig):
+ output["esmfold_config"] = self.esmfold_config.to_dict()
+ return output
+
+
+@dataclass
+class EsmFoldConfig:
+ esm_type: str = None
+ fp16_esm: bool = True
+ use_esm_attn_map: bool = False
+ esm_ablate_pairwise: bool = False
+ esm_ablate_sequence: bool = False
+ esm_input_dropout: float = 0
+
+ embed_aa: bool = True
+ bypass_lm: bool = False
+
+ lddt_head_hid_dim: int = 128
+ trunk: "TrunkConfig" = None
+
+ def __post_init__(self):
+ if self.trunk is None:
+ self.trunk = TrunkConfig()
+ elif isinstance(self.trunk, dict):
+ self.trunk = TrunkConfig(**self.trunk)
+
+ def to_dict(self):
+ """
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
+
+ Returns:
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
+ """
+ output = asdict(self)
+ output["trunk"] = self.trunk.to_dict()
+ return output
+
+
+@dataclass
+class TrunkConfig:
+ num_blocks: int = 48
+ sequence_state_dim: int = 1024
+ pairwise_state_dim: int = 128
+ sequence_head_width: int = 32
+ pairwise_head_width: int = 32
+ position_bins: int = 32
+ dropout: float = 0
+ layer_drop: float = 0
+ cpu_grad_checkpoint: bool = False
+ max_recycles: int = 4
+ chunk_size: Optional[int] = 128
+ structure_module: "StructureModuleConfig" = None
+
+ def __post_init__(self):
+ if self.structure_module is None:
+ self.structure_module = StructureModuleConfig()
+ elif isinstance(self.structure_module, dict):
+ self.structure_module = StructureModuleConfig(**self.structure_module)
+
+ if self.max_recycles <= 0:
+ raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}.")
+ if self.sequence_state_dim % self.sequence_state_dim != 0:
+ raise ValueError(
+ "`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
+ f" {self.sequence_state_dim} and {self.sequence_state_dim}."
+ )
+ if self.pairwise_state_dim % self.pairwise_state_dim != 0:
+ raise ValueError(
+ "`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
+ f" {self.pairwise_state_dim} and {self.pairwise_state_dim}."
+ )
+
+ sequence_num_heads = self.sequence_state_dim // self.sequence_head_width
+ pairwise_num_heads = self.pairwise_state_dim // self.pairwise_head_width
+
+ if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
+ raise ValueError(
+ "`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
+ f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}."
+ )
+ if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
+ raise ValueError(
+ "`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
+ f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}."
+ )
+ if self.pairwise_state_dim % 2 != 0:
+ raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.")
+
+ if self.dropout >= 0.4:
+ raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}.")
+
+ def to_dict(self):
+ """
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
+
+ Returns:
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
+ """
+ output = asdict(self)
+ output["structure_module"] = self.structure_module.to_dict()
+ return output
+
+
+@dataclass
+class StructureModuleConfig:
+ """
+ Args:
+ sequence_dim:
+ Single representation channel dimension
+ pairwise_dim:
+ Pair representation channel dimension
+ ipa_dim:
+ IPA hidden channel dimension
+ resnet_dim:
+ Angle resnet (Alg. 23 lines 11-14) hidden channel dimension
+ num_heads_ipa:
+ Number of IPA heads
+ num_qk_points:
+ Number of query/key points to generate during IPA
+ num_v_points:
+ Number of value points to generate during IPA
+ dropout_rate:
+ Dropout rate used throughout the layer
+ num_blocks:
+ Number of structure module blocks
+ num_transition_layers:
+ Number of layers in the single representation transition (Alg. 23 lines 8-9)
+ num_resnet_blocks:
+ Number of blocks in the angle resnet
+ num_angles:
+ Number of angles to generate in the angle resnet
+ trans_scale_factor:
+ Scale of single representation transition hidden dimension
+ epsilon:
+ Small number used in angle resnet normalization
+ inf:
+ Large number used for attention masking
+ """
+
+ sequence_dim: int = 384
+ pairwise_dim: int = 128
+ ipa_dim: int = 16
+ resnet_dim: int = 128
+ num_heads_ipa: int = 12
+ num_qk_points: int = 4
+ num_v_points: int = 8
+ dropout_rate: float = 0.1
+ num_blocks: int = 8
+ num_transition_layers: int = 1
+ num_resnet_blocks: int = 2
+ num_angles: int = 7
+ trans_scale_factor: int = 10
+ epsilon: float = 1e-8
+ inf: float = 1e5
+
+ def to_dict(self):
+ return asdict(self)
+
+
+def get_default_vocab_list():
+ return (
+ "",
+ "",
+ "",
+ "",
+ "L",
+ "A",
+ "G",
+ "V",
+ "S",
+ "E",
+ "R",
+ "T",
+ "I",
+ "D",
+ "P",
+ "K",
+ "Q",
+ "N",
+ "F",
+ "Y",
+ "M",
+ "H",
+ "W",
+ "C",
+ "X",
+ "B",
+ "U",
+ "Z",
+ "O",
+ ".",
+ "-",
+ "",
+ "",
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/convert_esm.py b/venv/lib/python3.10/site-packages/transformers/models/esm/convert_esm.py
new file mode 100644
index 0000000000000000000000000000000000000000..22ca3f5392c19d6b1c36a69d0738b8528bfaaa9d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/convert_esm.py
@@ -0,0 +1,400 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ESM checkpoint."""
+
+
+import argparse
+import pathlib
+from pathlib import Path
+from tempfile import TemporaryDirectory
+
+import esm as esm_module
+import torch
+from esm.esmfold.v1.misc import batch_encode_sequences as esmfold_encode_sequences
+from esm.esmfold.v1.pretrained import esmfold_v1
+
+from transformers.models.esm.configuration_esm import EsmConfig, EsmFoldConfig
+from transformers.models.esm.modeling_esm import (
+ EsmForMaskedLM,
+ EsmForSequenceClassification,
+ EsmIntermediate,
+ EsmLayer,
+ EsmOutput,
+ EsmSelfAttention,
+ EsmSelfOutput,
+)
+from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
+from transformers.models.esm.tokenization_esm import EsmTokenizer
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+SAMPLE_DATA = [
+ (
+ "protein1",
+ "MNGTEGPNFYVPFSNATGVVRSPFEYPQYYLAEPWQFSMLAAYMFLLIVLGFPINFLTLYVTVQHKKLRTPLNYILLNLAVADLFMVLGGFTSTLYTSLHGYFVFGPTGCNLEGFFATLGGEIALWSLVVLAIERYVVVCKPMSNFRFGENHAIMGVAFTWVMALACAAPPLAGWSRYIPEGLQCSCGIDYYTLKPEVNNESFVIYMFVVHFTIPMIIIFFCYGQLVFTVKEAAAQQQESATTQKAEKEVTRMVIIMVIAFLICWVPYASVAFYIFTHQGSNFGPIFMTIPAFFAKSAAIYNPVIYIMMNKQFRNCMLTTICCGKNPLGDDEASATVSKTETSQVAPA",
+ ),
+ ("protein2", "MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLA"),
+ ("protein3", "MKTVRQERLKSIRILERSKEPVSGAQLAEELSSRQVIVQDIAYLRSLGYNVATPRGYVLAGG"),
+ ("protein4", "MKTVRQERLKSIRILERSKEPVSGAQLAEELSSRQVIVQDIAYLRSLGYNVATPRGYVLA"),
+]
+
+MODEL_MAPPING = {
+ "esm1b_t33_650M_UR50S": esm_module.pretrained.esm1b_t33_650M_UR50S,
+ "esm1v_t33_650M_UR90S_1": esm_module.pretrained.esm1v_t33_650M_UR90S_1,
+ "esm1v_t33_650M_UR90S_2": esm_module.pretrained.esm1v_t33_650M_UR90S_2,
+ "esm1v_t33_650M_UR90S_3": esm_module.pretrained.esm1v_t33_650M_UR90S_3,
+ "esm1v_t33_650M_UR90S_4": esm_module.pretrained.esm1v_t33_650M_UR90S_4,
+ "esm1v_t33_650M_UR90S_5": esm_module.pretrained.esm1v_t33_650M_UR90S_5,
+ "esm2_t48_15B_UR50D": esm_module.pretrained.esm2_t48_15B_UR50D,
+ "esm2_t36_3B_UR50D": esm_module.pretrained.esm2_t36_3B_UR50D,
+ "esm2_t33_650M_UR50D": esm_module.pretrained.esm2_t33_650M_UR50D,
+ "esm2_t30_150M_UR50D": esm_module.pretrained.esm2_t30_150M_UR50D,
+ "esm2_t12_35M_UR50D": esm_module.pretrained.esm2_t12_35M_UR50D,
+ "esm2_t6_8M_UR50D": esm_module.pretrained.esm2_t6_8M_UR50D,
+ "esmfold_v1": esmfold_v1,
+}
+
+restypes = list("ARNDCQEGHILKMFPSTWYV")
+
+restypes_with_x = restypes + ["X"]
+restypes_with_extras = restypes_with_x + ["", "", "", "", ""]
+
+
+def get_esmfold_tokenizer():
+ with TemporaryDirectory() as tempdir:
+ vocab = "\n".join(restypes_with_extras)
+ vocab_file = Path(tempdir) / "vocab.txt"
+ vocab_file.write_text(vocab)
+ hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file))
+ hf_tokenizer.pad_token_id = 0 # Overlaps with 'A' but that seems to be what they want
+ return hf_tokenizer
+
+
+def transfer_and_check_weights(original_module, our_module):
+ status = our_module.load_state_dict(original_module.state_dict())
+ if status.missing_keys:
+ raise ValueError(f"Missing keys: {status.missing_keys}")
+ if status.unexpected_keys:
+ raise ValueError(f"Unexpected keys: {status.unexpected_keys}")
+
+
+def convert_esm_checkpoint_to_pytorch(
+ model: str, pytorch_dump_folder_path: str, classification_head: bool, push_to_repo: str, auth_token: str
+):
+ """
+ Copy/paste/tweak esm's weights to our BERT structure.
+ """
+ if model.startswith("esmfold"):
+ esm = MODEL_MAPPING[model]()
+ else:
+ esm, alphabet = MODEL_MAPPING[model]()
+ esm.eval() # disable dropout
+
+ if model.startswith("esmfold"):
+ embed_dim = esm.esm.embed_dim
+ num_layers = esm.esm.num_layers
+ num_attention_heads = esm.esm.attention_heads
+ intermediate_size = 4 * embed_dim
+ token_dropout = esm.esm.token_dropout
+ emb_layer_norm_before = False # This code path does not exist in ESM-2
+ position_embedding_type = "rotary"
+ is_folding_model = True
+ esmfold_config = EsmFoldConfig()
+ for key, val in esm.cfg.items():
+ if hasattr(esmfold_config, key) and key != "trunk":
+ setattr(esmfold_config, key, val)
+ for key, val in esm.cfg.trunk.items():
+ if hasattr(esmfold_config.trunk, key) and key != "structure_module":
+ setattr(esmfold_config.trunk, key, val)
+ for key, val in esm.cfg.trunk.structure_module.items():
+ if hasattr(esmfold_config.trunk.structure_module, key):
+ setattr(esmfold_config.trunk.structure_module, key, val)
+ elif hasattr(esm, "args"):
+ # Indicates an ESM-1b or ESM-1v model
+ embed_dim = esm.args.embed_dim
+ num_layers = esm.args.layers
+ num_attention_heads = esm.args.attention_heads
+ intermediate_size = esm.args.ffn_embed_dim
+ token_dropout = esm.args.token_dropout
+ emb_layer_norm_before = True if esm.emb_layer_norm_before else False
+ position_embedding_type = "absolute"
+ is_folding_model = False
+ esmfold_config = None
+ else:
+ # Indicates an ESM-2 model
+ embed_dim = esm.embed_dim
+ num_layers = esm.num_layers
+ num_attention_heads = esm.attention_heads
+ intermediate_size = 4 * embed_dim # This is hardcoded in ESM-2
+ token_dropout = esm.token_dropout
+ emb_layer_norm_before = False # This code path does not exist in ESM-2
+ position_embedding_type = "rotary"
+ is_folding_model = False
+ esmfold_config = None
+
+ if is_folding_model:
+ alphabet = esm.esm.alphabet
+ vocab_list = tuple(alphabet.all_toks)
+ mask_token_id = alphabet.mask_idx
+ pad_token_id = alphabet.padding_idx
+
+ if is_folding_model:
+ original_esm_model = esm.esm
+ else:
+ original_esm_model = esm
+
+ config = EsmConfig(
+ vocab_size=original_esm_model.embed_tokens.num_embeddings,
+ mask_token_id=mask_token_id,
+ hidden_size=embed_dim,
+ num_hidden_layers=num_layers,
+ num_attention_heads=num_attention_heads,
+ intermediate_size=intermediate_size,
+ max_position_embeddings=1026,
+ layer_norm_eps=1e-5, # PyTorch default used in fairseq
+ attention_probs_dropout_prob=0.0,
+ hidden_dropout_prob=0.0,
+ pad_token_id=pad_token_id,
+ emb_layer_norm_before=emb_layer_norm_before,
+ token_dropout=token_dropout,
+ position_embedding_type=position_embedding_type,
+ is_folding_model=is_folding_model,
+ esmfold_config=esmfold_config,
+ vocab_list=vocab_list,
+ )
+ if classification_head:
+ config.num_labels = esm.classification_heads["mnli"].out_proj.weight.shape[0]
+ print("Our ESM config:", config)
+
+ if model.startswith("esmfold"):
+ model_class = EsmForProteinFolding
+ elif classification_head:
+ model_class = EsmForSequenceClassification
+ else:
+ model_class = EsmForMaskedLM
+ model = model_class(config)
+ model.eval()
+
+ # Now let's copy all the weights.
+ # Embeddings
+ model.esm.embeddings.word_embeddings.weight = original_esm_model.embed_tokens.weight
+ if position_embedding_type == "absolute":
+ model.esm.embeddings.position_embeddings.weight = original_esm_model.embed_positions.weight
+
+ if config.emb_layer_norm_before:
+ model.esm.embeddings.layer_norm.weight = original_esm_model.emb_layer_norm_before.weight
+ model.esm.embeddings.layer_norm.bias = original_esm_model.emb_layer_norm_before.bias
+
+ model.esm.encoder.emb_layer_norm_after.weight = original_esm_model.emb_layer_norm_after.weight
+ model.esm.encoder.emb_layer_norm_after.bias = original_esm_model.emb_layer_norm_after.bias
+
+ for i in range(config.num_hidden_layers):
+ # Encoder: start of layer
+ layer: EsmLayer = model.esm.encoder.layer[i]
+ # esm_layer: TransformerSentenceEncoderLayer = original_esm_model.layers[i]
+ esm_layer = original_esm_model.layers[i]
+
+ # self attention
+ self_attn: EsmSelfAttention = layer.attention.self
+ assert (
+ esm_layer.self_attn.k_proj.weight.data.shape
+ == esm_layer.self_attn.q_proj.weight.data.shape
+ == esm_layer.self_attn.v_proj.weight.data.shape
+ == torch.Size((config.hidden_size, config.hidden_size))
+ )
+
+ self_attn.query.weight.data = esm_layer.self_attn.q_proj.weight
+ self_attn.query.bias.data = esm_layer.self_attn.q_proj.bias
+ self_attn.key.weight.data = esm_layer.self_attn.k_proj.weight
+ self_attn.key.bias.data = esm_layer.self_attn.k_proj.bias
+ self_attn.value.weight.data = esm_layer.self_attn.v_proj.weight
+ self_attn.value.bias.data = esm_layer.self_attn.v_proj.bias
+
+ if getattr(esm_layer.self_attn, "rot_emb", None) is not None:
+ # Matt: Although inv_freq is not a trainable weight, it is computed at model init and cached.
+ # During the training of ESM-2 the model was converted to float16 precision, which also converts
+ # the inv_freq tensor, and the loss of precision remains even if the model is loaded later as float32.
+ # If we recompute inv_freq without this loss of precision then we will get subtly different rotary
+ # embeddings, which are enough to cause significant discrepancies in model outputs. To avoid this,
+ # we make sure the new model copies the data from the old inv_freq.
+ self_attn.rotary_embeddings.inv_freq.data = esm_layer.self_attn.rot_emb.inv_freq
+
+ # LayerNorm changes for pre-activation
+ layer.attention.LayerNorm.weight = esm_layer.self_attn_layer_norm.weight
+ layer.attention.LayerNorm.bias = esm_layer.self_attn_layer_norm.bias
+ layer.LayerNorm.weight = esm_layer.final_layer_norm.weight
+ layer.LayerNorm.bias = esm_layer.final_layer_norm.bias
+
+ # self-attention output
+ self_output: EsmSelfOutput = layer.attention.output
+ assert self_output.dense.weight.shape == esm_layer.self_attn.out_proj.weight.shape
+ self_output.dense.weight = esm_layer.self_attn.out_proj.weight
+ self_output.dense.bias = esm_layer.self_attn.out_proj.bias
+
+ # intermediate
+ intermediate: EsmIntermediate = layer.intermediate
+ assert intermediate.dense.weight.shape == esm_layer.fc1.weight.shape
+ intermediate.dense.weight = esm_layer.fc1.weight
+ intermediate.dense.bias = esm_layer.fc1.bias
+
+ # output
+ bert_output: EsmOutput = layer.output
+ assert bert_output.dense.weight.shape == esm_layer.fc2.weight.shape
+ bert_output.dense.weight = esm_layer.fc2.weight
+ bert_output.dense.bias = esm_layer.fc2.bias
+ # end of layer
+
+ if is_folding_model:
+ model.esm_s_combine.data = esm.esm_s_combine.data
+ model.af2_to_esm.data = esm.af2_to_esm.data
+ transfer_and_check_weights(esm.embedding, model.embedding)
+ transfer_and_check_weights(esm.esm_s_mlp, model.esm_s_mlp)
+ transfer_and_check_weights(esm.trunk, model.trunk)
+ transfer_and_check_weights(esm.distogram_head, model.distogram_head)
+ transfer_and_check_weights(esm.ptm_head, model.ptm_head)
+ transfer_and_check_weights(esm.lm_head, model.lm_head)
+ transfer_and_check_weights(esm.lddt_head, model.lddt_head)
+
+ elif classification_head:
+ model.classifier.dense.weight = esm.esm.classification_heads["mnli"].dense.weight
+ model.classifier.dense.bias = esm.classification_heads["mnli"].dense.bias
+ model.classifier.out_proj.weight = esm.classification_heads["mnli"].out_proj.weight
+ model.classifier.out_proj.bias = esm.classification_heads["mnli"].out_proj.bias
+ else:
+ # LM Head
+ model.lm_head.dense.weight = esm.lm_head.dense.weight
+ model.lm_head.dense.bias = esm.lm_head.dense.bias
+ model.lm_head.layer_norm.weight = esm.lm_head.layer_norm.weight
+ model.lm_head.layer_norm.bias = esm.lm_head.layer_norm.bias
+ model.lm_head.decoder.weight = esm.lm_head.weight
+ model.lm_head.bias = esm.lm_head.bias
+
+ # Contact prediction head
+ transfer_and_check_weights(esm.contact_head, model.esm.contact_head)
+
+ # Prepare data (first 2 sequences from ESMStructuralSplitDataset superfamily / 4)
+ if is_folding_model:
+ # Folding models aren't trained on masked inputs and don't like mask tokens.
+ sample_data = SAMPLE_DATA[:2]
+ else:
+ sample_data = SAMPLE_DATA
+
+ if is_folding_model:
+ hf_tokenizer = get_esmfold_tokenizer()
+ hf_tokens = hf_tokenizer(
+ [row[1] for row in sample_data], return_tensors="pt", padding=True, add_special_tokens=False
+ )
+ esmfold_aas, esmfold_mask, _, _, _ = esmfold_encode_sequences([row[1] for row in sample_data])
+ success = torch.all(hf_tokens["input_ids"] == esmfold_aas) and torch.all(
+ hf_tokens["attention_mask"] == esmfold_mask
+ )
+ else:
+ # Let's check that we get the same results.
+ batch_converter = alphabet.get_batch_converter()
+ batch_labels, batch_strs, batch_tokens = batch_converter(sample_data)
+ # Prepare tokenizer and make sure it matches
+ with TemporaryDirectory() as tempdir:
+ vocab = "\n".join(alphabet.all_toks)
+ vocab_file = Path(tempdir) / "vocab.txt"
+ vocab_file.write_text(vocab)
+ hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file))
+
+ hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors="pt", padding=True)
+ success = torch.all(hf_tokens["input_ids"] == batch_tokens)
+
+ print("Do both models tokenizers output the same tokens?", "🔥" if success else "💩")
+ if not success:
+ raise Exception("Tokenization does not match!")
+
+ with torch.no_grad():
+ if is_folding_model:
+ # Let's test the model in parts
+ # ESMFold always converts the ESM stem to float16, which requires float16 ops
+ # that don't exist on CPU. Therefore, to test it we need to run it on GPU. However,
+ # ESMFold is what we in the community call a "big boy" and so we desperately avoid putting both the
+ # original and the converted model on the GPU at the same time.
+ their_output = esm.cuda().infer([row[1] for row in sample_data])
+ our_output = model.cuda()(
+ input_ids=hf_tokens["input_ids"].cuda(), attention_mask=hf_tokens["attention_mask"].cuda()
+ )
+ else:
+ our_output = model(**hf_tokens, output_hidden_states=True)
+ our_output = our_output["logits"]
+ if classification_head:
+ their_output = esm.model.classification_heads["mnli"](esm.extract_features(batch_tokens))
+ else:
+ their_output = esm(hf_tokens["input_ids"], repr_layers=list(range(999)))
+ their_output = their_output["logits"]
+
+ if is_folding_model:
+ max_absolute_diff = torch.max(torch.abs(our_output["positions"] - their_output["positions"])).item()
+ success = torch.allclose(our_output["positions"], their_output["positions"], atol=1e-5)
+ else:
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
+ success = torch.allclose(our_output, their_output, atol=1e-5)
+
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
+
+ if not success:
+ raise Exception("Something went wRoNg")
+
+ if not is_folding_model:
+ # Let's check contact prediction too
+ our_output = model.predict_contacts(hf_tokens["input_ids"], hf_tokens["attention_mask"])
+ their_output = esm.predict_contacts(hf_tokens["input_ids"])
+ max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
+ success = torch.allclose(our_output, their_output, atol=1e-5)
+
+ print("Contact prediction testing:")
+ print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5
+ print("Do both models output the same tensors?", "🔥" if success else "💩")
+
+ if not success:
+ raise Exception("Something went wRoNg")
+
+ pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ del esm # Free up some memory before continuing
+
+ print(f"Saving tokenizer to {pytorch_dump_folder_path}")
+ hf_tokenizer.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_repo:
+ model.push_to_hub(repo_id=push_to_repo, token_token=auth_token)
+ hf_tokenizer.push_to_hub(repo_id=push_to_repo, token_token=auth_token)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--pytorch_dump_folder_path", type=str, required=True, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--classification_head", action="store_true", help="Whether to convert a final classification head."
+ )
+ parser.add_argument("--model", default=None, type=str, required=True, help="Name of model to convert.")
+ parser.add_argument("--push_to_repo", type=str, help="Repo to upload to (including username!).")
+ parser.add_argument("--auth_token", type=str, help="HuggingFace auth token.")
+ args = parser.parse_args()
+ convert_esm_checkpoint_to_pytorch(
+ args.model, args.pytorch_dump_folder_path, args.classification_head, args.push_to_repo, args.auth_token
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esm.py b/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esm.py
new file mode 100644
index 0000000000000000000000000000000000000000..a97ea58d7b81d9969cdac3a6d805b5fe34b9ac3f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esm.py
@@ -0,0 +1,1265 @@
+# coding=utf-8
+# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ESM model."""
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ MaskedLMOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import logging
+from .configuration_esm import EsmConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D"
+_CONFIG_FOR_DOC = "EsmConfig"
+
+
+from ..deprecated._archive_maps import ESM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def rotate_half(x):
+ x1, x2 = x.chunk(2, dim=-1)
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(x, cos, sin):
+ cos = cos[:, :, : x.shape[-2], :]
+ sin = sin[:, :, : x.shape[-2], :]
+
+ return (x * cos) + (rotate_half(x) * sin)
+
+
+def gelu(x):
+ """
+ This is the gelu implementation from the original ESM repo. Using F.gelu yields subtly wrong results.
+ """
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
+
+
+def symmetrize(x):
+ "Make layer symmetric in final two dimensions, used for contact prediction."
+ return x + x.transpose(-1, -2)
+
+
+def average_product_correct(x):
+ "Perform average product correct, used for contact prediction."
+ a1 = x.sum(-1, keepdims=True)
+ a2 = x.sum(-2, keepdims=True)
+ a12 = x.sum((-1, -2), keepdims=True)
+
+ avg = a1 * a2
+ avg.div_(a12) # in-place to reduce memory
+ normalized = x - avg
+ return normalized
+
+
+class RotaryEmbedding(torch.nn.Module):
+ """
+ Rotary position embeddings based on those in
+ [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
+ matrices which depend on their relative positions.
+ """
+
+ def __init__(self, dim: int):
+ super().__init__()
+ # Generate and save the inverse frequency buffer (non trainable)
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
+ inv_freq = inv_freq
+ self.register_buffer("inv_freq", inv_freq)
+
+ self._seq_len_cached = None
+ self._cos_cached = None
+ self._sin_cached = None
+
+ def _update_cos_sin_tables(self, x, seq_dimension=2):
+ seq_len = x.shape[seq_dimension]
+
+ # Reset the tables if the sequence length has changed,
+ # or if we're on a new device (possibly due to tracing for instance)
+ if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
+ self._seq_len_cached = seq_len
+ t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq)
+ freqs = torch.outer(t, self.inv_freq)
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
+
+ self._cos_cached = emb.cos()[None, None, :, :]
+ self._sin_cached = emb.sin()[None, None, :, :]
+
+ return self._cos_cached, self._sin_cached
+
+ def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2)
+
+ return (
+ apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached),
+ apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached),
+ )
+
+
+class EsmContactPredictionHead(nn.Module):
+ """Performs symmetrization, apc, and computes a logistic regression on the output features"""
+
+ def __init__(
+ self,
+ in_features: int,
+ bias=True,
+ eos_idx: int = 2,
+ ):
+ super().__init__()
+ self.in_features = in_features
+ self.eos_idx = eos_idx
+ self.regression = nn.Linear(in_features, 1, bias)
+ self.activation = nn.Sigmoid()
+
+ def forward(self, tokens, attentions):
+ # remove eos token attentions
+ eos_mask = tokens.ne(self.eos_idx).to(attentions)
+ eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
+ attentions = attentions * eos_mask[:, None, None, :, :]
+ attentions = attentions[..., :-1, :-1]
+ # remove cls token attentions
+ attentions = attentions[..., 1:, 1:]
+ batch_size, layers, heads, seqlen, _ = attentions.size()
+ attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
+
+ # features: batch x channels x tokens x tokens (symmetric)
+ attentions = attentions.to(
+ self.regression.weight.device
+ ) # attentions always float32, may need to convert to float16
+ attentions = average_product_correct(symmetrize(attentions))
+ attentions = attentions.permute(0, 2, 3, 1)
+ return self.activation(self.regression(attentions).squeeze(3))
+
+
+class EsmEmbeddings(nn.Module):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+
+ if config.emb_layer_norm_before:
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ else:
+ self.layer_norm = None
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+ self.token_dropout = config.token_dropout
+ self.mask_token_id = config.mask_token_id
+
+ def forward(
+ self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an
+ # embedding_scale factor here.
+ embeddings = inputs_embeds
+
+ # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout
+ # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however,
+ # masked tokens are treated as if they were selected for input dropout and zeroed out.
+ # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by
+ # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample).
+ # This is analogous to the way that dropout layers scale down outputs during evaluation when not
+ # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training).
+ if self.token_dropout:
+ embeddings = embeddings.masked_fill((input_ids == self.mask_token_id).unsqueeze(-1), 0.0)
+ mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs
+ src_lengths = attention_mask.sum(-1)
+ mask_ratio_observed = (input_ids == self.mask_token_id).sum(-1).float() / src_lengths
+ embeddings = (embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]).to(
+ embeddings.dtype
+ )
+
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings = embeddings + position_embeddings
+
+ if self.layer_norm is not None:
+ embeddings = self.layer_norm(embeddings)
+ if attention_mask is not None:
+ embeddings = (embeddings * attention_mask.unsqueeze(-1)).to(embeddings.dtype)
+ # Matt: I think this line was copied incorrectly from BERT, disabling it for now.
+ # embeddings = self.dropout(embeddings)
+ return embeddings
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+class EsmSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ self.rotary_embeddings = None
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+ elif self.position_embedding_type == "rotary":
+ self.rotary_embeddings = RotaryEmbedding(dim=self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim).
+ # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent,
+ # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original
+ # ESM code and fix rotary embeddings.
+ query_layer = query_layer * self.attention_head_size**-0.5
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ if self.position_embedding_type == "rotary":
+ query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ seq_length = hidden_states.size()[1]
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in EsmModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs.to(value_layer.dtype), value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+class EsmSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = hidden_states + input_tensor
+ return hidden_states
+
+
+class EsmAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = EsmSelfAttention(config)
+ self.output = EsmSelfOutput(config)
+ self.pruned_heads = set()
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ ):
+ hidden_states_ln = self.LayerNorm(hidden_states)
+ self_outputs = self.self(
+ hidden_states_ln,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class EsmIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = gelu(hidden_states)
+ return hidden_states
+
+
+class EsmOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = hidden_states + input_tensor
+ return hidden_states
+
+
+class EsmLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = EsmAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = EsmAttention(config)
+ self.intermediate = EsmIntermediate(config)
+ self.output = EsmOutput(config)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ ):
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise AttributeError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated"
+ " with cross-attention layers by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = self.feed_forward_chunk(attention_output)
+
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ attention_output_ln = self.LayerNorm(attention_output)
+ intermediate_output = self.intermediate(attention_output_ln)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class EsmEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([EsmLayer(config) for _ in range(config.num_hidden_layers)])
+ self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
+ "`use_cache=False`..."
+ )
+ use_cache = False
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache = next_decoder_cache + (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if self.emb_layer_norm_after:
+ hidden_states = self.emb_layer_norm_after(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class EsmPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class EsmPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = EsmConfig
+ base_model_prefix = "esm"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["EsmLayer", "EsmFoldTriangularSelfAttentionBlock", "EsmEmbeddings"]
+
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+ESM_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`EsmConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ESM_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
+ ESM_START_DOCSTRING,
+)
+class EsmModel(EsmPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = EsmEmbeddings(config)
+ self.encoder = EsmEncoder(config)
+
+ self.pooler = EsmPooler(config) if add_pooling_layer else None
+
+ self.contact_head = EsmContactPredictionHead(
+ in_features=config.num_hidden_layers * config.num_attention_heads, bias=True
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+ def predict_contacts(self, tokens, attention_mask):
+ attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions
+ attns = torch.stack(attns, dim=1) # Matches the original model layout
+ # In the original model, attentions for padding tokens are completely zeroed out.
+ # This makes no difference most of the time because the other tokens won't attend to them,
+ # but it does for the contact prediction task, which takes attentions as input,
+ # so we have to mimic that here.
+ attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(3)
+ attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(4)
+ return self.contact_head(tokens, attns)
+
+
+@add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING)
+class EsmForMaskedLM(EsmPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.esm = EsmModel(config, add_pooling_layer=False)
+ self.lm_head = EsmLMHead(config)
+
+ self.init_weights()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Used to hide legacy arguments that have been deprecated.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.esm(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+
+ labels = labels.to(prediction_scores.device)
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def predict_contacts(self, tokens, attention_mask):
+ return self.esm.predict_contacts(tokens, attention_mask=attention_mask)
+
+
+class EsmLMHead(nn.Module):
+ """ESM Head for masked language modeling."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ def forward(self, features, **kwargs):
+ x = self.dense(features)
+ x = gelu(x)
+ x = self.layer_norm(x)
+
+ # project back to size of vocabulary with bias
+ x = self.decoder(x) + self.bias
+ return x
+
+
+@add_start_docstrings(
+ """
+ ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ ESM_START_DOCSTRING,
+)
+class EsmForSequenceClassification(EsmPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.esm = EsmModel(config, add_pooling_layer=False)
+ self.classifier = EsmClassificationHead(config)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.esm(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ ESM_START_DOCSTRING,
+)
+class EsmForTokenClassification(EsmPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.esm = EsmModel(config, add_pooling_layer=False)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.esm(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+
+ labels = labels.to(logits.device)
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class EsmClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(self, features, **kwargs):
+ x = features[:, 0, :] # take token (equiv. to [CLS])
+ x = self.dropout(x)
+ x = self.dense(x)
+ x = torch.tanh(x)
+ x = self.dropout(x)
+ x = self.out_proj(x)
+ return x
+
+
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esmfold.py b/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esmfold.py
new file mode 100644
index 0000000000000000000000000000000000000000..3aaf811960721b55d5e10a28a4e3be5aaeed1ec7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_esmfold.py
@@ -0,0 +1,2322 @@
+# coding=utf-8
+# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+import sys
+from dataclasses import dataclass
+from functools import partial
+from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
+
+import numpy as np
+import torch
+import torch.nn as nn
+from torch.nn import LayerNorm
+
+from ...integrations.deepspeed import is_deepspeed_available
+from ...modeling_outputs import ModelOutput
+from ...utils import (
+ ContextManagers,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_scipy_available,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_esm import EsmConfig
+from .modeling_esm import ESM_START_DOCSTRING, EsmModel, EsmPreTrainedModel
+from .openfold_utils import (
+ OFProtein,
+ Rigid,
+ Rotation,
+ atom14_to_atom37,
+ chunk_layer,
+ compute_predicted_aligned_error,
+ compute_tm,
+ frames_and_literature_positions_to_atom14_pos,
+ make_atom14_masks,
+ residue_constants,
+ to_pdb,
+ torsion_angles_to_frames,
+)
+
+
+logger = logging.get_logger(__name__)
+_CHECKPOINT_FOR_DOC = "facebook/esmfold_v1"
+_CONFIG_FOR_DOC = "EsmConfig"
+
+
+@dataclass
+class EsmForProteinFoldingOutput(ModelOutput):
+ """
+ Output type of [`EsmForProteinFoldingOutput`].
+
+ Args:
+ frames (`torch.FloatTensor`):
+ Output frames.
+ sidechain_frames (`torch.FloatTensor`):
+ Output sidechain frames.
+ unnormalized_angles (`torch.FloatTensor`):
+ Predicted unnormalized backbone and side chain torsion angles.
+ angles (`torch.FloatTensor`):
+ Predicted backbone and side chain torsion angles.
+ positions (`torch.FloatTensor`):
+ Predicted positions of the backbone and side chain atoms.
+ states (`torch.FloatTensor`):
+ Hidden states from the protein folding trunk.
+ s_s (`torch.FloatTensor`):
+ Per-residue embeddings derived by concatenating the hidden states of each layer of the ESM-2 LM stem.
+ s_z (`torch.FloatTensor`):
+ Pairwise residue embeddings.
+ distogram_logits (`torch.FloatTensor`):
+ Input logits to the distogram used to compute residue distances.
+ lm_logits (`torch.FloatTensor`):
+ Logits output by the ESM-2 protein language model stem.
+ aatype (`torch.FloatTensor`):
+ Input amino acids (AlphaFold2 indices).
+ atom14_atom_exists (`torch.FloatTensor`):
+ Whether each atom exists in the atom14 representation.
+ residx_atom14_to_atom37 (`torch.FloatTensor`):
+ Mapping between atoms in the atom14 and atom37 representations.
+ residx_atom37_to_atom14 (`torch.FloatTensor`):
+ Mapping between atoms in the atom37 and atom14 representations.
+ atom37_atom_exists (`torch.FloatTensor`):
+ Whether each atom exists in the atom37 representation.
+ residue_index (`torch.FloatTensor`):
+ The index of each residue in the protein chain. Unless internal padding tokens are used, this will just be
+ a sequence of integers from 0 to `sequence_length`.
+ lddt_head (`torch.FloatTensor`):
+ Raw outputs from the lddt head used to compute plddt.
+ plddt (`torch.FloatTensor`):
+ Per-residue confidence scores. Regions of low confidence may indicate areas where the model's prediction is
+ uncertain, or where the protein structure is disordered.
+ ptm_logits (`torch.FloatTensor`):
+ Raw logits used for computing ptm.
+ ptm (`torch.FloatTensor`):
+ TM-score output representing the model's high-level confidence in the overall structure.
+ aligned_confidence_probs (`torch.FloatTensor`):
+ Per-residue confidence scores for the aligned structure.
+ predicted_aligned_error (`torch.FloatTensor`):
+ Predicted error between the model's prediction and the ground truth.
+ max_predicted_aligned_error (`torch.FloatTensor`):
+ Per-sample maximum predicted error.
+ """
+
+ frames: torch.FloatTensor = None
+ sidechain_frames: torch.FloatTensor = None
+ unnormalized_angles: torch.FloatTensor = None
+ angles: torch.FloatTensor = None
+ positions: torch.FloatTensor = None
+ states: torch.FloatTensor = None
+ s_s: torch.FloatTensor = None
+ s_z: torch.FloatTensor = None
+ distogram_logits: torch.FloatTensor = None
+ lm_logits: torch.FloatTensor = None
+ aatype: torch.FloatTensor = None
+ atom14_atom_exists: torch.FloatTensor = None
+ residx_atom14_to_atom37: torch.FloatTensor = None
+ residx_atom37_to_atom14: torch.FloatTensor = None
+ atom37_atom_exists: torch.FloatTensor = None
+ residue_index: torch.FloatTensor = None
+ lddt_head: torch.FloatTensor = None
+ plddt: torch.FloatTensor = None
+ ptm_logits: torch.FloatTensor = None
+ ptm: torch.FloatTensor = None
+ aligned_confidence_probs: torch.FloatTensor = None
+ predicted_aligned_error: torch.FloatTensor = None
+ max_predicted_aligned_error: torch.FloatTensor = None
+
+
+ESMFOLD_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ masking_pattern (`torch.LongTensor` of shape `({0})`, *optional*):
+ Locations of tokens to mask during training as a form of regularization. Mask values selected in `[0, 1]`.
+ num_recycles (`int`, *optional*, defaults to `None`):
+ Number of times to recycle the input sequence. If `None`, defaults to `config.num_recycles`. "Recycling"
+ consists of passing the output of the folding trunk back in as input to the trunk. During training, the
+ number of recycles should vary with each batch, to ensure that the model learns to output valid predictions
+ after each recycle. During inference, num_recycles should be set to the highest value that the model was
+ trained with for maximum accuracy. Accordingly, when this value is set to `None`, config.max_recycles is
+ used.
+"""
+
+
+def is_fp16_enabled():
+ # Autocast world
+ fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16
+ fp16_enabled = fp16_enabled and torch.is_autocast_enabled()
+
+ return fp16_enabled
+
+
+def is_deepspeed_initialized():
+ if is_deepspeed_available():
+ return False
+ else:
+ try:
+ import deepspeed
+
+ # This is not available in all DeepSpeed versions.
+ return deepspeed.utils.is_initialized()
+ except Exception:
+ return False
+
+
+def collate_dense_tensors(samples: List[torch.Tensor], pad_v: float = 0) -> torch.Tensor:
+ """
+ Takes a list of tensors with the following dimensions:
+ [(d_11, ..., d_1K),
+ (d_21, ..., d_2K), ..., (d_N1, ..., d_NK)]
+ and stack + pads them into a single tensor of:
+ (N, max_i=1,N { d_i1 }, ..., max_i=1,N {diK})
+ """
+ if len(samples) == 0:
+ return torch.Tensor()
+ if len({x.dim() for x in samples}) != 1:
+ raise RuntimeError(f"Samples has varying dimensions: {[x.dim() for x in samples]}")
+ (device,) = tuple({x.device for x in samples}) # assumes all on same device
+ max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])]
+ result = torch.empty(len(samples), *max_shape, dtype=samples[0].dtype, device=device)
+ result.fill_(pad_v)
+ for i in range(len(samples)):
+ result_i = result[i]
+ t = samples[i]
+ result_i[tuple(slice(0, k) for k in t.shape)] = t
+ return result
+
+
+def flatten_final_dims(t: torch.Tensor, no_dims: int):
+ return t.reshape(t.shape[:-no_dims] + (-1,))
+
+
+def permute_final_dims(tensor: torch.Tensor, inds: List[int]):
+ zero_index = -1 * len(inds)
+ first_inds = list(range(len(tensor.shape[:zero_index])))
+ return tensor.permute(first_inds + [zero_index + i for i in inds])
+
+
+def dict_multimap(fn, dicts):
+ first = dicts[0]
+ new_dict = {}
+ for k, v in first.items():
+ all_v = [d[k] for d in dicts]
+ if isinstance(v, dict):
+ new_dict[k] = dict_multimap(fn, all_v)
+ else:
+ new_dict[k] = fn(all_v)
+
+ return new_dict
+
+
+def trunc_normal_init_(weights, scale=1.0, fan="fan_in"):
+ shape = weights.shape
+ scale = scale / max(1, shape[1])
+
+ if not is_scipy_available():
+ logger.warning(
+ "This init requires scipy, but scipy was not found, default to an approximation that might not be"
+ " equivalent."
+ )
+ std = math.sqrt(scale)
+ torch.nn.init.normal_(weights, std=std).clamp(min=0.0, max=2.0 * std)
+
+ else:
+ from scipy.stats import truncnorm
+
+ std = math.sqrt(scale) / truncnorm.std(a=-2, b=2, loc=0, scale=1)
+ samples = truncnorm.rvs(a=-2, b=2, loc=0, scale=std, size=weights.numel())
+ samples = np.reshape(samples, shape)
+ weights.copy_(torch.tensor(samples, device=weights.device))
+
+
+def ipa_point_weights_init_(weights):
+ with torch.no_grad():
+ softplus_inverse_1 = 0.541324854612918
+ weights.fill_(softplus_inverse_1)
+
+
+class EsmFoldLinear(nn.Linear):
+ """
+ A Linear layer with built-in nonstandard initializations. Called just like torch.nn.Linear.
+
+ Implements the initializers in 1.11.4, plus some additional ones found in the code.
+ """
+
+ def __init__(
+ self,
+ in_dim: int,
+ out_dim: int,
+ bias: bool = True,
+ init: str = "default",
+ init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,
+ ):
+ """
+ Args:
+ in_dim:
+ The final dimension of inputs to the layer
+ out_dim:
+ The final dimension of layer outputs
+ bias:
+ Whether to learn an additive bias. True by default
+ init:
+ The initializer to use. Choose from:
+
+ "default": LeCun fan-in truncated normal initialization "relu": He initialization w/ truncated normal
+ distribution "glorot": Fan-average Glorot uniform initialization "gating": Weights=0, Bias=1 "normal":
+ Normal initialization with std=1/sqrt(fan_in) "final": Weights=0, Bias=0
+
+ Overridden by init_fn if the latter is not None.
+ init_fn:
+ A custom initializer taking weight and bias as inputs. Overrides init if not None.
+ """
+ super().__init__(in_dim, out_dim, bias=bias)
+
+ if bias:
+ with torch.no_grad():
+ self.bias.fill_(0)
+ self.init = init
+ self.init_fn = init_fn
+
+ if init not in ["default", "relu", "glorot", "gating", "normal", "final"]:
+ raise ValueError("Invalid init string.")
+
+
+class EsmFoldLayerNorm(nn.Module):
+ def __init__(self, c_in, eps=1e-5):
+ super().__init__()
+
+ self.c_in = (c_in,)
+ self.eps = eps
+
+ self.weight = nn.Parameter(torch.ones(c_in))
+ self.bias = nn.Parameter(torch.zeros(c_in))
+
+ def forward(self, x):
+ d = x.dtype
+ if d is torch.bfloat16 and not is_deepspeed_initialized():
+ with torch.cuda.amp.autocast(enabled=False):
+ out = nn.functional.layer_norm(x, self.c_in, self.weight.to(dtype=d), self.bias.to(dtype=d), self.eps)
+ else:
+ out = nn.functional.layer_norm(x, self.c_in, self.weight, self.bias, self.eps)
+
+ return out
+
+
+@torch.jit.ignore
+def softmax_no_cast(t: torch.Tensor, dim: int = -1) -> torch.Tensor:
+ """
+ Softmax, but without automatic casting to fp32 when the input is of type bfloat16
+ """
+ d = t.dtype
+ if d is torch.bfloat16 and not is_deepspeed_initialized():
+ with torch.cuda.amp.autocast(enabled=False):
+ s = torch.nn.functional.softmax(t, dim=dim)
+ else:
+ s = torch.nn.functional.softmax(t, dim=dim)
+
+ return s
+
+
+class EsmFoldAttention(nn.Module):
+ """
+ Standard multi-head attention using AlphaFold's default layer initialization. Allows multiple bias vectors.
+ """
+
+ def __init__(
+ self,
+ c_q: int,
+ c_k: int,
+ c_v: int,
+ c_hidden: int,
+ no_heads: int,
+ gating: bool = True,
+ ):
+ """
+ Args:
+ c_q:
+ Input dimension of query data
+ c_k:
+ Input dimension of key data
+ c_v:
+ Input dimension of value data
+ c_hidden:
+ Per-head hidden dimension
+ no_heads:
+ Number of attention heads
+ gating:
+ Whether the output should be gated using query data
+ """
+ super().__init__()
+
+ self.c_q = c_q
+ self.c_k = c_k
+ self.c_v = c_v
+ self.c_hidden = c_hidden
+ self.no_heads = no_heads
+ self.gating = gating
+
+ # DISCREPANCY: c_hidden is not the per-head channel dimension, as
+ # stated in the supplement, but the overall channel dimension.
+
+ self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot")
+ self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init="glorot")
+ self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init="glorot")
+ self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init="final")
+
+ self.linear_g = None
+ if self.gating:
+ self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init="gating")
+
+ self.sigmoid = nn.Sigmoid()
+
+ def _prep_qkv(self, q_x: torch.Tensor, kv_x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ # [*, Q/K/V, H * C_hidden]
+ q = self.linear_q(q_x)
+ k = self.linear_k(kv_x)
+ v = self.linear_v(kv_x)
+
+ # [*, Q/K, H, C_hidden]
+ q = q.view(q.shape[:-1] + (self.no_heads, -1))
+ k = k.view(k.shape[:-1] + (self.no_heads, -1))
+ v = v.view(v.shape[:-1] + (self.no_heads, -1))
+
+ # [*, H, Q/K, C_hidden]
+ q = q.transpose(-2, -3)
+ k = k.transpose(-2, -3)
+ v = v.transpose(-2, -3)
+
+ q /= math.sqrt(self.c_hidden)
+
+ return q, k, v
+
+ def _wrap_up(self, o: torch.Tensor, q_x: torch.Tensor) -> torch.Tensor:
+ if self.linear_g is not None:
+ g = self.sigmoid(self.linear_g(q_x))
+
+ # [*, Q, H, C_hidden]
+ g = g.view(g.shape[:-1] + (self.no_heads, -1))
+ o = o * g
+
+ # [*, Q, H * C_hidden]
+ o = flatten_final_dims(o, 2)
+
+ # [*, Q, C_q]
+ o = self.linear_o(o)
+
+ return o
+
+ def forward(
+ self,
+ q_x: torch.Tensor,
+ kv_x: torch.Tensor,
+ biases: Optional[List[torch.Tensor]] = None,
+ use_memory_efficient_kernel: bool = False,
+ use_lma: bool = False,
+ lma_q_chunk_size: int = 1024,
+ lma_kv_chunk_size: int = 4096,
+ use_flash: bool = False,
+ flash_mask: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ """
+ Args:
+ q_x:
+ [*, Q, C_q] query data
+ kv_x:
+ [*, K, C_k] key data
+ biases:
+ List of biases that broadcast to [*, H, Q, K]
+ use_memory_efficient_kernel:
+ Whether to use a custom memory-efficient attention kernel. This should be the default choice for most.
+ If none of the "use_<...>" flags are True, a stock PyTorch implementation is used instead
+ use_lma:
+ Whether to use low-memory attention (Staats & Rabe 2021). If none of the "use_<...>" flags are True, a
+ stock PyTorch implementation is used instead
+ lma_q_chunk_size:
+ Query chunk size (for LMA)
+ lma_kv_chunk_size:
+ Key/Value chunk size (for LMA)
+ Returns
+ [*, Q, C_q] attention update
+ """
+ if use_lma and (lma_q_chunk_size is None or lma_kv_chunk_size is None):
+ raise ValueError("If use_lma is specified, lma_q_chunk_size and lma_kv_chunk_size must be provided")
+
+ if use_flash and biases is not None:
+ raise ValueError("use_flash is incompatible with the bias option. For masking, use flash_mask instead")
+
+ attn_options = [use_memory_efficient_kernel, use_lma, use_flash]
+ if sum(attn_options) > 1:
+ raise ValueError("Choose at most one alternative attention algorithm")
+
+ if biases is None:
+ biases = []
+
+ # [*, H, Q/K, C_hidden]
+ query, key, value = self._prep_qkv(q_x, kv_x)
+ key = permute_final_dims(key, (1, 0))
+
+ # [*, H, Q, K]
+ output = torch.matmul(query, key)
+ for b in biases:
+ output += b
+ output = softmax_no_cast(output, -1)
+
+ # [*, H, Q, C_hidden]
+ output = torch.matmul(output, value)
+ output = output.transpose(-2, -3)
+ output = self._wrap_up(output, q_x)
+
+ return output
+
+
+class EsmFoldTriangleAttention(nn.Module):
+ def __init__(self, c_in, c_hidden, no_heads, starting=True, inf=1e9):
+ """
+ Args:
+ c_in:
+ Input channel dimension
+ c_hidden:
+ Overall hidden channel dimension (not per-head)
+ no_heads:
+ Number of attention heads
+ """
+ super().__init__()
+
+ self.c_in = c_in
+ self.c_hidden = c_hidden
+ self.no_heads = no_heads
+ self.starting = starting
+ self.inf = inf
+
+ self.layer_norm = LayerNorm(self.c_in)
+
+ self.linear = EsmFoldLinear(c_in, self.no_heads, bias=False, init="normal")
+
+ self.mha = EsmFoldAttention(self.c_in, self.c_in, self.c_in, self.c_hidden, self.no_heads)
+
+ @torch.jit.ignore
+ def _chunk(
+ self,
+ x: torch.Tensor,
+ biases: List[torch.Tensor],
+ chunk_size: int,
+ use_memory_efficient_kernel: bool = False,
+ use_lma: bool = False,
+ inplace_safe: bool = False,
+ ) -> torch.Tensor:
+ "triangle! triangle!"
+ mha_inputs = {
+ "q_x": x,
+ "kv_x": x,
+ "biases": biases,
+ }
+
+ return chunk_layer(
+ partial(self.mha, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma),
+ mha_inputs,
+ chunk_size=chunk_size,
+ no_batch_dims=len(x.shape[:-2]),
+ _out=x if inplace_safe else None,
+ )
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ mask: Optional[torch.Tensor] = None,
+ chunk_size: Optional[int] = None,
+ use_memory_efficient_kernel: bool = False,
+ use_lma: bool = False,
+ inplace_safe: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ x:
+ [*, I, J, C_in] input tensor (e.g. the pair representation)
+ Returns:
+ [*, I, J, C_in] output tensor
+ """
+ if mask is None:
+ # [*, I, J]
+ mask = x.new_ones(
+ x.shape[:-1],
+ )
+
+ if not self.starting:
+ x = x.transpose(-2, -3)
+ mask = mask.transpose(-1, -2)
+
+ # [*, I, J, C_in]
+ x = self.layer_norm(x)
+
+ # [*, I, 1, 1, J]
+ mask_bias = (self.inf * (mask - 1))[..., :, None, None, :]
+
+ # [*, H, I, J]
+ triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1))
+
+ # [*, 1, H, I, J]
+ triangle_bias = triangle_bias.unsqueeze(-4)
+
+ biases = [mask_bias, triangle_bias]
+
+ if chunk_size is not None:
+ x = self._chunk(
+ x,
+ biases,
+ chunk_size,
+ use_memory_efficient_kernel=use_memory_efficient_kernel,
+ use_lma=use_lma,
+ inplace_safe=inplace_safe,
+ )
+ else:
+ x = self.mha(
+ q_x=x, kv_x=x, biases=biases, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma
+ )
+
+ if not self.starting:
+ x = x.transpose(-2, -3)
+
+ return x
+
+
+class EsmFoldTriangleMultiplicativeUpdate(nn.Module):
+ """
+ Implements Algorithms 11 and 12.
+ """
+
+ def __init__(self, config, _outgoing=True):
+ super().__init__()
+ c_hidden = config.pairwise_state_dim
+ self._outgoing = _outgoing
+
+ self.linear_a_p = EsmFoldLinear(c_hidden, c_hidden)
+ self.linear_a_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
+ self.linear_b_p = EsmFoldLinear(c_hidden, c_hidden)
+ self.linear_b_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
+ self.linear_g = EsmFoldLinear(c_hidden, c_hidden, init="gating")
+ self.linear_z = EsmFoldLinear(c_hidden, c_hidden, init="final")
+
+ self.layer_norm_in = LayerNorm(c_hidden)
+ self.layer_norm_out = LayerNorm(c_hidden)
+
+ self.sigmoid = nn.Sigmoid()
+
+ def _combine_projections(
+ self, a: torch.Tensor, b: torch.Tensor, _inplace_chunk_size: Optional[int] = None
+ ) -> torch.Tensor:
+ if self._outgoing:
+ a = permute_final_dims(a, (2, 0, 1))
+ b = permute_final_dims(b, (2, 1, 0))
+ else:
+ a = permute_final_dims(a, (2, 1, 0))
+ b = permute_final_dims(b, (2, 0, 1))
+
+ if _inplace_chunk_size is not None:
+ # To be replaced by torch vmap
+ for i in range(0, a.shape[-3], _inplace_chunk_size):
+ a_chunk = a[..., i : i + _inplace_chunk_size, :, :]
+ b_chunk = b[..., i : i + _inplace_chunk_size, :, :]
+ a[..., i : i + _inplace_chunk_size, :, :] = torch.matmul(
+ a_chunk,
+ b_chunk,
+ )
+
+ p = a
+ else:
+ p = torch.matmul(a, b)
+
+ return permute_final_dims(p, (1, 2, 0))
+
+ def _inference_forward(
+ self,
+ z: torch.Tensor,
+ mask: Optional[torch.Tensor] = None,
+ inplace_chunk_size: Optional[int] = None,
+ with_add: bool = True,
+ ):
+ """
+ Args:
+ z:
+ A [*, N, N, C_z] pair representation
+ mask:
+ A [*, N, N] pair mask
+ inplace_chunk_size:
+ Size of chunks used in the main computation. Increase to trade memory for speed.
+ with_add:
+ If True, z is overwritten with (z + update). Otherwise, it is overwritten with (update).
+ Returns:
+ A reference to the overwritten z
+
+ More memory-efficient, inference-only version of the forward function. Uses in-place operations, fusion of the
+ addition that happens after this module in the Evoformer, a smidge of recomputation, and a cache of overwritten
+ values to lower peak memory consumption of this module from 5x the size of the input tensor z to 2.5x its size.
+ Useful for inference on extremely long sequences.
+
+ It works as follows. We will make reference to variables used in the default forward implementation below.
+ Naively, triangle multiplication attention requires the manifestation of 5 tensors the size of z: 1) z, the
+ "square" input tensor, 2) a, the first projection of z, 3) b, the second projection of b, 4) g, a z-sized mask,
+ and 5) a z-sized tensor for intermediate computations. For large N, this is prohibitively expensive; for
+ N=4000, for example, z is more than 8GB alone. To avoid this problem, we compute b, g, and all intermediate
+ tensors in small chunks, noting that the chunks required to compute a chunk of the output depend only on the
+ tensor a and corresponding vertical and horizontal chunks of z. This suggests an algorithm that loops over
+ pairs of chunks of z: hereafter "columns" and "rows" of z, even though each "column" and "row" in fact contains
+ inplace_chunk_size contiguous true columns and rows of z. Writing output chunks to a new tensor would bring
+ total memory consumption down to 3x the size of z. However, more memory can be saved by writing output chunks
+ directly to z in-place. WLOG, we choose to write output chunks vertically, overwriting the ith "column" of z at
+ the end of the ith iteration of the main loop. Despite this overwriting, the ith column is always one column
+ ahead of previously overwritten columns and can be recovered directly from z. After the first iteration,
+ however, the ith row of z is always at least partially overwritten. For this reason, we introduce the z-cache,
+ a tensor one-half the size of z. The z-cache initially contains the left half (2nd and 3rd quadrants) of z. For
+ 0 < i < N/2, the missing left part of the ith row of z is recovered from this cache at the beginning of the ith
+ iteration. Once i exceeds n/2, the cache is "reoriented" to encompass the 3rd and 4th quadrants of z instead.
+ Though the 3rd quadrant of the original z is entirely overwritten at this point, it can be recovered from the
+ z-cache itself. Thereafter, the ith row of z can be recovered in its entirety from the reoriented z-cache.
+ After the final iteration, z has been completely overwritten and contains the triangular multiplicative update.
+ If with_add is True, it instead contains the sum of z and the triangular multiplicative update. In either case,
+ peak memory consumption is just 2.5x the size of z, disregarding memory used for chunks and other small
+ variables.
+ """
+ if mask is None:
+ mask = z.new_ones(z.shape[:-1])
+
+ mask = mask.unsqueeze(-1)
+
+ def compute_projection_helper(pair, mask, a=True):
+ if a:
+ linear_g = self.linear_a_g
+ linear_p = self.linear_a_p
+ else:
+ linear_g = self.linear_b_g
+ linear_p = self.linear_b_p
+
+ pair = self.layer_norm_in(pair)
+ p = linear_g(pair)
+ p.sigmoid_()
+ p *= linear_p(pair)
+ p *= mask
+ p = permute_final_dims(p, (2, 0, 1))
+ return p
+
+ def compute_projection(pair, mask, a=True, chunked=True):
+ need_transpose = self._outgoing ^ a
+ if not chunked:
+ p = compute_projection_helper(pair, mask, a)
+ if need_transpose:
+ p = p.transpose(-1, -2)
+ else:
+ # This computation is chunked so as not to exceed our 2.5x
+ # budget with a large intermediate tensor
+ linear_g = self.linear_a_g if a else self.linear_b_g
+ c = linear_g.bias.shape[-1]
+ out_shape = pair.shape[:-3] + (c,) + pair.shape[-3:-1]
+ p = pair.new_zeros(out_shape)
+ for i in range(0, pair.shape[-3], inplace_chunk_size):
+ pair_chunk = pair[..., i : i + inplace_chunk_size, :, :]
+ pair_chunk = compute_projection_helper(
+ pair[..., i : i + inplace_chunk_size, :, :],
+ mask[..., i : i + inplace_chunk_size, :, :],
+ a,
+ )
+ if need_transpose:
+ pair_chunk = pair_chunk.transpose(-1, -2)
+ p[..., i : i + inplace_chunk_size] = pair_chunk
+ else:
+ p[..., i : i + inplace_chunk_size, :] = pair_chunk
+
+ del pair_chunk
+
+ return p
+
+ # We start by fully manifesting a. In addition to the input, this
+ # brings total memory consumption to 2x z (disregarding size of chunks)
+ # [*, N, N, c]
+ a = compute_projection(z, mask, True, chunked=True)
+
+ if inplace_chunk_size is not None:
+ n = a.shape[-1]
+ half_n = n // 2 + n % 2
+ row_dim = -3
+ col_dim = -2
+ b_chunk_dim = row_dim if self._outgoing else col_dim
+
+ def empty_slicer(t):
+ return [slice(None) for _ in t.shape]
+
+ def slice_tensor(t, start, end, dim):
+ # Slices start:end from the dim dimension of t
+ s = empty_slicer(t)
+ s[dim] = slice(start, end)
+ return t[s]
+
+ def flip_z_cache_(z_cache, z):
+ # "Reorient" the z_cache (see below), filling it with quadrants
+ # 3---recovered from the z_cache---and 4---recovered from z---
+ # of the input tensor z.
+ quadrant_3 = slice_tensor(z_cache, half_n, None, row_dim)
+ z_cache = z_cache.transpose(row_dim, col_dim)
+
+ # If n is odd, we need to shrink the z_cache by one row
+ z_cache = z_cache[..., : (n // 2), :, :]
+
+ # Move the 3rd quadrant of z into the
+ first_half_slicer = empty_slicer(z_cache)
+ first_half_slicer[col_dim] = slice(0, half_n)
+ z_cache[first_half_slicer] = quadrant_3
+
+ # Get the fourth quadrant of z
+ quadrant_4 = slice_tensor(z, half_n, None, row_dim)
+ quadrant_4 = slice_tensor(quadrant_4, half_n, None, col_dim)
+
+ # Insert said quadrant into the rotated z-cache
+ quadrant_3_slicer = empty_slicer(z_cache)
+ quadrant_3_slicer[col_dim] = slice(half_n, None)
+
+ z_cache[quadrant_3_slicer] = quadrant_4
+
+ return z_cache
+
+ # Initialize the z cache to the left half of z.
+ z_cache_shape = list(z.shape)
+ z_cache_shape[col_dim] = half_n
+ z_cache = z.new_zeros(z_cache_shape)
+ z_cache_slicer = empty_slicer(z_cache)
+ z_cache_slicer[col_dim] = slice(0, half_n)
+ z_cache.copy_(z[z_cache_slicer])
+ z_cache_rotated = False
+
+ # We need to reorient the z-cache at the halfway point, and we
+ # don't want a single chunk to straddle that point. We contract one
+ # of the chunks in the middle to address that problem.
+ i_range = list(range(0, half_n, inplace_chunk_size))
+ initial_offsets = [i_2 - i_1 for i_1, i_2 in zip(i_range, i_range[1:] + [half_n])]
+ after_half = list(range(half_n, n, inplace_chunk_size))
+ after_half_offsets = [inplace_chunk_size for _ in after_half]
+ combined_range_with_offsets = zip(i_range + after_half, initial_offsets + after_half_offsets)
+ for i, offset in combined_range_with_offsets:
+ if not z_cache_rotated and i >= half_n:
+ z_cache = flip_z_cache_(z_cache, z)
+ z_cache_rotated = True
+
+ z_chunk_b = slice_tensor(z, i, i + offset, b_chunk_dim)
+ mask_chunk = slice_tensor(mask, i, i + offset, b_chunk_dim)
+
+ z_chunk_b = z_chunk_b.clone()
+ if b_chunk_dim == col_dim:
+ z_chunk_b = slice_tensor(z, i, i + offset, col_dim)
+ else: # b_chunk_dim == row_dim
+ # In this case, the b-dimension (b_chunk_dim) is partially
+ # overwritten at the end of each iteration. We need to
+ # restore the missing component from the z-cache.
+ if not z_cache_rotated:
+ z_chunk_slicer = empty_slicer(z_chunk_b)
+ z_chunk_slicer[col_dim] = slice(0, half_n)
+ z_chunk_b[z_chunk_slicer] = slice_tensor(z_cache, i, i + offset, row_dim)
+ else:
+ z_cache_offset = i - half_n
+ z_chunk_b = slice_tensor(z_cache, z_cache_offset, z_cache_offset + offset, row_dim)
+
+ b_chunk = compute_projection(z_chunk_b, mask_chunk, a=False, chunked=False)
+ del z_chunk_b
+
+ x_chunk = torch.matmul(a, b_chunk)
+ x_chunk = permute_final_dims(x_chunk, (1, 2, 0))
+ x_chunk = self.layer_norm_out(x_chunk)
+ x_chunk = self.linear_z(x_chunk)
+
+ # The g dimension (col_dim) is parallel to and ahead of the
+ # overwrites in z. We can extract the g chunk normally.
+ z_chunk_g = slice_tensor(z, i, i + offset, col_dim)
+ g_chunk = self.linear_g(self.layer_norm_in(z_chunk_g))
+ g_chunk.sigmoid_()
+ del z_chunk_g
+
+ x_chunk *= g_chunk
+
+ # Write the columns into z in-place
+ z_slicer = empty_slicer(z)
+ z_slicer[col_dim] = slice(i, i + offset)
+ if with_add:
+ z[z_slicer] += x_chunk
+ else:
+ z[z_slicer] = x_chunk
+ else:
+ b = compute_projection(z, mask, False, False)
+ x = torch.matmul(a, b)
+ x = self.layer_norm_out(x)
+ x = self.linear_z(x)
+ g = self.linear_g(z)
+ g.sigmoid_()
+ x *= g
+ if with_add:
+ z += x
+ else:
+ z = x
+
+ return z
+
+ def forward(
+ self,
+ z: torch.Tensor,
+ mask: Optional[torch.Tensor] = None,
+ inplace_safe: bool = False,
+ _add_with_inplace: bool = False,
+ _inplace_chunk_size: Optional[int] = 256,
+ ) -> torch.Tensor:
+ """
+ Args:
+ x:
+ [*, N_res, N_res, C_z] input tensor
+ mask:
+ [*, N_res, N_res] input mask
+ Returns:
+ [*, N_res, N_res, C_z] output tensor
+ """
+ if inplace_safe:
+ x = self._inference_forward(
+ z,
+ mask,
+ inplace_chunk_size=_inplace_chunk_size,
+ with_add=_add_with_inplace,
+ )
+ return x
+
+ if mask is None:
+ mask = z.new_ones(z.shape[:-1])
+
+ mask = mask.unsqueeze(-1)
+
+ z = self.layer_norm_in(z)
+ a = mask
+ a = a * self.sigmoid(self.linear_a_g(z))
+ a = a * self.linear_a_p(z)
+ b = mask
+ b = b * self.sigmoid(self.linear_b_g(z))
+ b = b * self.linear_b_p(z)
+
+ if is_fp16_enabled():
+ with torch.cuda.amp.autocast(enabled=False):
+ x = self._combine_projections(a.float(), b.float())
+ else:
+ x = self._combine_projections(a, b)
+
+ del a, b
+ x = self.layer_norm_out(x)
+ x = self.linear_z(x)
+ g = self.sigmoid(self.linear_g(z))
+ x = x * g
+
+ return x
+
+
+class EsmFoldPreTrainedModel(EsmPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ # Subclass `EsMPreTrainedModel` to deal with special init
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, EsmFoldLinear):
+ with torch.no_grad():
+ if module.init_fn is not None:
+ module.init_fn(module.weight, module.bias)
+ elif module.init == "default":
+ trunc_normal_init_(module.weight, scale=1.0)
+ elif module.init == "relu":
+ trunc_normal_init_(module.weight, scale=2.0)
+ elif module.init == "glorot":
+ nn.init.xavier_uniform_(module.weight, gain=1)
+ elif module.init == "gating":
+ module.weight.fill_(0.0)
+ if module.bias:
+ module.bias.fill_(1.0)
+ elif module.init == "normal":
+ torch.nn.init.kaiming_normal_(module.weight, nonlinearity="linear")
+ elif module.init == "final":
+ module.weight.fill_(0.0)
+ elif isinstance(module, EsmFoldInvariantPointAttention):
+ ipa_point_weights_init_(module.head_weights)
+ elif isinstance(module, EsmFoldTriangularSelfAttentionBlock):
+ torch.nn.init.zeros_(module.tri_mul_in.linear_z.weight)
+ torch.nn.init.zeros_(module.tri_mul_in.linear_z.bias)
+ torch.nn.init.zeros_(module.tri_mul_out.linear_z.weight)
+ torch.nn.init.zeros_(module.tri_mul_out.linear_z.bias)
+ torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.weight)
+ torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.bias)
+ torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.weight)
+ torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.bias)
+
+ torch.nn.init.zeros_(module.sequence_to_pair.o_proj.weight)
+ torch.nn.init.zeros_(module.sequence_to_pair.o_proj.bias)
+ torch.nn.init.zeros_(module.pair_to_sequence.linear.weight)
+ torch.nn.init.zeros_(module.seq_attention.o_proj.weight)
+ torch.nn.init.zeros_(module.seq_attention.o_proj.bias)
+ torch.nn.init.zeros_(module.mlp_seq.mlp[-2].weight)
+ torch.nn.init.zeros_(module.mlp_seq.mlp[-2].bias)
+ torch.nn.init.zeros_(module.mlp_pair.mlp[-2].weight)
+ torch.nn.init.zeros_(module.mlp_pair.mlp[-2].bias)
+ else:
+ super()._init_weights(module)
+
+
+class EsmFoldSelfAttention(nn.Module):
+ def __init__(self, embed_dim, num_heads, head_width, gated=False):
+ super().__init__()
+ assert embed_dim == num_heads * head_width
+
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.head_width = head_width
+
+ self.proj = nn.Linear(embed_dim, embed_dim * 3, bias=False)
+ self.o_proj = nn.Linear(embed_dim, embed_dim, bias=True)
+ self.gated = gated
+ if gated:
+ self.g_proj = nn.Linear(embed_dim, embed_dim)
+ torch.nn.init.zeros_(self.g_proj.weight)
+ torch.nn.init.ones_(self.g_proj.bias)
+
+ self.rescale_factor = self.head_width**-0.5
+
+ torch.nn.init.zeros_(self.o_proj.bias)
+
+ def forward(self, x, mask=None, bias=None, indices=None):
+ """
+ Basic self attention with optional mask and external pairwise bias. To handle sequences of different lengths,
+ use mask.
+
+ Inputs:
+ x: batch of input sequneces (.. x L x C) mask: batch of boolean masks where 1=valid, 0=padding position (..
+ x L_k) bias: batch of scalar pairwise attention biases (.. x Lq x Lk x num_heads)
+
+ Outputs:
+ sequence projection (B x L x embed_dim), attention maps (B x L x L x num_heads)
+ """
+
+ t = self.proj(x).view(*x.shape[:2], self.num_heads, -1)
+ t = t.permute(0, 2, 1, 3)
+ q, k, v = t.chunk(3, dim=-1)
+
+ q = self.rescale_factor * q
+ a = torch.einsum("...qc,...kc->...qk", q, k)
+
+ # Add external attention bias.
+ if bias is not None:
+ a = a + bias.permute(0, 3, 1, 2)
+
+ # Do not attend to padding tokens.
+ if mask is not None:
+ mask = mask[:, None, None]
+ a = a.masked_fill(mask == False, -np.inf) # noqa: E712
+
+ a = nn.functional.softmax(a, dim=-1)
+
+ y = torch.einsum("...hqk,...hkc->...qhc", a, v)
+ y = y.reshape(*y.shape[:2], -1)
+
+ if self.gated:
+ y = self.g_proj(x).sigmoid() * y
+ y = self.o_proj(y)
+
+ return y, a.permute(0, 3, 1, 2)
+
+
+class EsmFoldDropout(nn.Module):
+ """
+ Implementation of dropout with the ability to share the dropout mask along a particular dimension.
+ """
+
+ def __init__(self, r: float, batch_dim: Union[int, List[int]]):
+ super().__init__()
+
+ self.r = r
+ if isinstance(batch_dim, int):
+ batch_dim = [batch_dim]
+ self.batch_dim = batch_dim
+ self.dropout = nn.Dropout(self.r)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ shape = list(x.shape)
+ if self.batch_dim is not None:
+ for bd in self.batch_dim:
+ shape[bd] = 1
+ return x * self.dropout(x.new_ones(shape))
+
+
+class EsmFoldSequenceToPair(nn.Module):
+ def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim):
+ super().__init__()
+
+ self.layernorm = nn.LayerNorm(sequence_state_dim)
+ self.proj = nn.Linear(sequence_state_dim, inner_dim * 2, bias=True)
+ self.o_proj = nn.Linear(2 * inner_dim, pairwise_state_dim, bias=True)
+
+ torch.nn.init.zeros_(self.proj.bias)
+ torch.nn.init.zeros_(self.o_proj.bias)
+
+ def forward(self, sequence_state):
+ """
+ Inputs:
+ sequence_state: B x L x sequence_state_dim
+
+ Output:
+ pairwise_state: B x L x L x pairwise_state_dim
+
+ Intermediate state:
+ B x L x L x 2*inner_dim
+ """
+
+ assert len(sequence_state.shape) == 3
+
+ s = self.layernorm(sequence_state)
+ s = self.proj(s)
+ q, k = s.chunk(2, dim=-1)
+
+ prod = q[:, None, :, :] * k[:, :, None, :]
+ diff = q[:, None, :, :] - k[:, :, None, :]
+
+ x = torch.cat([prod, diff], dim=-1)
+ x = self.o_proj(x)
+
+ return x
+
+
+class EsmFoldPairToSequence(nn.Module):
+ def __init__(self, pairwise_state_dim, num_heads):
+ super().__init__()
+
+ self.layernorm = nn.LayerNorm(pairwise_state_dim)
+ self.linear = nn.Linear(pairwise_state_dim, num_heads, bias=False)
+
+ def forward(self, pairwise_state):
+ """
+ Inputs:
+ pairwise_state: B x L x L x pairwise_state_dim
+
+ Output:
+ pairwise_bias: B x L x L x num_heads
+ """
+ assert len(pairwise_state.shape) == 4
+ z = self.layernorm(pairwise_state)
+ pairwise_bias = self.linear(z)
+ return pairwise_bias
+
+
+class EsmFoldResidueMLP(nn.Module):
+ def __init__(self, embed_dim, inner_dim, dropout=0):
+ super().__init__()
+
+ self.mlp = nn.Sequential(
+ nn.LayerNorm(embed_dim),
+ nn.Linear(embed_dim, inner_dim),
+ nn.ReLU(),
+ nn.Linear(inner_dim, embed_dim),
+ nn.Dropout(dropout),
+ )
+
+ def forward(self, x):
+ return x + self.mlp(x)
+
+
+class EsmFoldTriangularSelfAttentionBlock(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ sequence_state_dim = config.sequence_state_dim
+ pairwise_state_dim = config.pairwise_state_dim
+ sequence_num_heads = sequence_state_dim // config.sequence_head_width
+ pairwise_num_heads = pairwise_state_dim // config.pairwise_head_width
+
+ self.layernorm_1 = nn.LayerNorm(sequence_state_dim)
+
+ self.sequence_to_pair = EsmFoldSequenceToPair(sequence_state_dim, pairwise_state_dim // 2, pairwise_state_dim)
+ self.pair_to_sequence = EsmFoldPairToSequence(pairwise_state_dim, sequence_num_heads)
+
+ self.seq_attention = EsmFoldSelfAttention(
+ sequence_state_dim, sequence_num_heads, config.sequence_head_width, gated=True
+ )
+ self.tri_mul_out = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=True)
+ self.tri_mul_in = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=False)
+
+ self.tri_att_start = EsmFoldTriangleAttention(
+ pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=True
+ )
+ self.tri_att_end = EsmFoldTriangleAttention(
+ pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1e9, starting=False
+ )
+
+ self.mlp_seq = EsmFoldResidueMLP(sequence_state_dim, 4 * sequence_state_dim, dropout=config.dropout)
+ self.mlp_pair = EsmFoldResidueMLP(pairwise_state_dim, 4 * pairwise_state_dim, dropout=config.dropout)
+
+ self.drop = nn.Dropout(config.dropout)
+ self.row_drop = EsmFoldDropout(config.dropout * 2, 2)
+ self.col_drop = EsmFoldDropout(config.dropout * 2, 1)
+
+ def forward(self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs):
+ """
+ Inputs:
+ sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim mask: B x L boolean
+ tensor of valid positions
+
+ Output:
+ sequence_state: B x L x sequence_state_dim pairwise_state: B x L x L x pairwise_state_dim
+ """
+ if len(sequence_state.shape) != 3:
+ raise ValueError(f"`sequence_state` should be a 3d-tensor, got {len(sequence_state.shape)} dims.")
+ if len(pairwise_state.shape) != 4:
+ raise ValueError(f"`pairwise_state` should be a 4d-tensor, got {len(pairwise_state.shape)} dims.")
+ if mask is not None and len(mask.shape) != 2:
+ raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.")
+
+ batch_dim, seq_dim, sequence_state_dim = sequence_state.shape
+ pairwise_state_dim = pairwise_state.shape[3]
+
+ if sequence_state_dim != self.config.sequence_state_dim:
+ raise ValueError(
+ "`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got "
+ f"{sequence_state_dim} != {self.config.sequence_state_dim}."
+ )
+ if pairwise_state_dim != self.config.pairwise_state_dim:
+ raise ValueError(
+ "`pairwise_state` last dimension should be equal to `self.pairwise_state_dim`. Got "
+ f"{pairwise_state_dim} != {self.config.pairwise_state_dim}."
+ )
+ if batch_dim != pairwise_state.shape[0]:
+ raise ValueError(
+ f"`sequence_state` and `pairwise_state` have inconsistent batch size: {batch_dim} != "
+ f"{pairwise_state.shape[0]}."
+ )
+ if seq_dim != pairwise_state.shape[1] or seq_dim != pairwise_state.shape[2]:
+ raise ValueError(
+ f"`sequence_state` and `pairwise_state` have inconsistent sequence length: {seq_dim} != "
+ f"{pairwise_state.shape[1]} or {pairwise_state.shape[2]}."
+ )
+
+ # Update sequence state
+ bias = self.pair_to_sequence(pairwise_state)
+
+ # Self attention with bias + mlp.
+ y = self.layernorm_1(sequence_state)
+ y, _ = self.seq_attention(y, mask=mask, bias=bias)
+ sequence_state = sequence_state + self.drop(y)
+ sequence_state = self.mlp_seq(sequence_state)
+
+ # Update pairwise state
+ pairwise_state = pairwise_state + self.sequence_to_pair(sequence_state)
+
+ # Axial attention with triangular bias.
+ tri_mask = mask.unsqueeze(2) * mask.unsqueeze(1) if mask is not None else None
+ pairwise_state = pairwise_state + self.row_drop(self.tri_mul_out(pairwise_state, mask=tri_mask))
+ pairwise_state = pairwise_state + self.col_drop(self.tri_mul_in(pairwise_state, mask=tri_mask))
+ pairwise_state = pairwise_state + self.row_drop(
+ self.tri_att_start(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
+ )
+ pairwise_state = pairwise_state + self.col_drop(
+ self.tri_att_end(pairwise_state, mask=tri_mask, chunk_size=chunk_size)
+ )
+
+ # MLP over pairs.
+ pairwise_state = self.mlp_pair(pairwise_state)
+
+ return sequence_state, pairwise_state
+
+
+class EsmCategoricalMixture:
+ def __init__(self, param, bins=50, start=0, end=1):
+ # All tensors are of shape ..., bins.
+ self.logits = param
+ bins = torch.linspace(start, end, bins + 1, device=self.logits.device, dtype=self.logits.dtype)
+ self.v_bins = (bins[:-1] + bins[1:]) / 2
+
+ def log_prob(self, true):
+ # Shapes are:
+ # self.probs: ... x bins
+ # true : ...
+ true_index = (true.unsqueeze(-1) - self.v_bins[[None] * true.ndim]).abs().argmin(-1)
+ nll = self.logits.log_softmax(-1)
+ return torch.take_along_dim(nll, true_index.unsqueeze(-1), dim=-1).squeeze(-1)
+
+ def mean(self):
+ return (self.logits.softmax(-1) @ self.v_bins.unsqueeze(1)).squeeze(-1)
+
+
+def categorical_lddt(logits, bins=50):
+ # Logits are ..., 37, bins.
+ return EsmCategoricalMixture(logits, bins=bins).mean()
+
+
+def get_axial_mask(mask):
+ """
+ Helper to convert B x L mask of valid positions to axial mask used in row column attentions.
+
+ Input:
+ mask: B x L tensor of booleans
+
+ Output:
+ mask: B x L x L tensor of booleans
+ """
+
+ if mask is None:
+ return None
+
+ if len(mask.shape) != 2:
+ raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.")
+ batch_dim, seq_dim = mask.shape
+ m = mask.unsqueeze(1).expand(batch_dim, seq_dim, seq_dim)
+ m = m.reshape(batch_dim * seq_dim, seq_dim)
+ return m
+
+
+class EsmFoldRelativePosition(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.bins = config.position_bins
+
+ # Note an additional offset is used so that the 0th position
+ # is reserved for masked pairs.
+ self.embedding = torch.nn.Embedding(2 * self.bins + 2, config.pairwise_state_dim)
+
+ def forward(self, residue_index, mask=None):
+ """
+ Input:
+ residue_index: B x L tensor of indices (dytpe=torch.long) mask: B x L tensor of booleans
+
+ Output:
+ pairwise_state: B x L x L x pairwise_state_dim tensor of embeddings
+ """
+ if residue_index.dtype != torch.long:
+ raise ValueError(f"`residue_index` has dtype {residue_index.dtype}, it should be `torch.long`.")
+ if mask is not None and residue_index.shape != mask.shape:
+ raise ValueError(
+ f"`residue_index` and `mask` have inconsistent shapes: {residue_index.shape} != {mask.shape}."
+ )
+
+ diff = residue_index[:, None, :] - residue_index[:, :, None]
+ diff = diff.clamp(-self.bins, self.bins)
+ diff = diff + self.bins + 1 # Add 1 to adjust for padding index.
+
+ if mask is not None:
+ mask = mask[:, None, :] * mask[:, :, None]
+ diff[mask == False] = 0 # noqa: E712
+
+ output = self.embedding(diff)
+ return output
+
+
+class EsmFoldAngleResnetBlock(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ self.linear_1 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="relu")
+ self.linear_2 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init="final")
+
+ self.relu = nn.ReLU()
+
+ def forward(self, a: torch.Tensor) -> torch.Tensor:
+ s_initial = a
+
+ a = self.relu(a)
+ a = self.linear_1(a)
+ a = self.relu(a)
+ a = self.linear_2(a)
+
+ return a + s_initial
+
+
+class EsmFoldAngleResnet(nn.Module):
+ """
+ Implements Algorithm 20, lines 11-14
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ self.linear_in = EsmFoldLinear(config.sequence_dim, config.resnet_dim)
+ self.linear_initial = EsmFoldLinear(config.sequence_dim, config.resnet_dim)
+
+ self.layers = nn.ModuleList()
+ for _ in range(config.num_resnet_blocks):
+ layer = EsmFoldAngleResnetBlock(config)
+ self.layers.append(layer)
+
+ self.linear_out = EsmFoldLinear(config.resnet_dim, config.num_angles * 2)
+
+ self.relu = nn.ReLU()
+
+ def forward(self, s: torch.Tensor, s_initial: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Args:
+ s:
+ [*, C_hidden] single embedding
+ s_initial:
+ [*, C_hidden] single embedding as of the start of the StructureModule
+ Returns:
+ [*, no_angles, 2] predicted angles
+ """
+ # NOTE: The ReLU's applied to the inputs are absent from the supplement
+ # pseudocode but present in the source. For maximal compatibility with
+ # the pretrained weights, I'm going with the source.
+
+ # [*, C_hidden]
+ s_initial = self.relu(s_initial)
+ s_initial = self.linear_initial(s_initial)
+ s = self.relu(s)
+ s = self.linear_in(s)
+ s = s + s_initial
+
+ for l in self.layers:
+ s = l(s)
+
+ s = self.relu(s)
+
+ # [*, no_angles * 2]
+ s = self.linear_out(s)
+
+ # [*, no_angles, 2]
+ s = s.view(s.shape[:-1] + (-1, 2))
+
+ unnormalized_s = s
+ norm_denom = torch.sqrt(
+ torch.clamp(
+ torch.sum(s**2, dim=-1, keepdim=True),
+ min=self.config.epsilon,
+ )
+ )
+ s = s / norm_denom
+
+ return unnormalized_s, s
+
+
+class EsmFoldInvariantPointAttention(nn.Module):
+ """
+ Implements Algorithm 22.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ c_s = config.sequence_dim
+ c_z = config.pairwise_dim
+ self.hidden_dim = config.ipa_dim
+ self.num_heads = config.num_heads_ipa
+ self.num_qk_points = config.num_qk_points
+ self.num_v_points = config.num_v_points
+
+ # These linear layers differ from their specifications in the
+ # supplement. There, they lack bias and use Glorot initialization.
+ # Here as in the official source, they have bias and use the default
+ # Lecun initialization.
+ hc = config.ipa_dim * config.num_heads_ipa
+ self.linear_q = EsmFoldLinear(c_s, hc)
+ self.linear_kv = EsmFoldLinear(c_s, 2 * hc)
+
+ hpq = config.num_heads_ipa * config.num_qk_points * 3
+ self.linear_q_points = EsmFoldLinear(c_s, hpq)
+
+ hpkv = config.num_heads_ipa * (config.num_qk_points + config.num_v_points) * 3
+ self.linear_kv_points = EsmFoldLinear(c_s, hpkv)
+
+ self.linear_b = EsmFoldLinear(c_z, config.num_heads_ipa)
+
+ self.head_weights = nn.Parameter(torch.zeros((config.num_heads_ipa)))
+
+ concat_out_dim = config.num_heads_ipa * (c_z + config.ipa_dim + config.num_v_points * 4)
+ self.linear_out = EsmFoldLinear(concat_out_dim, c_s, init="final")
+
+ self.softmax = nn.Softmax(dim=-1)
+ self.softplus = nn.Softplus()
+
+ def forward(
+ self,
+ s: torch.Tensor,
+ z: Optional[torch.Tensor],
+ r: Rigid,
+ mask: torch.Tensor,
+ _offload_inference: bool = False,
+ _z_reference_list: Optional[Sequence[torch.Tensor]] = None,
+ ) -> torch.Tensor:
+ """
+ Args:
+ s:
+ [*, N_res, C_s] single representation
+ z:
+ [*, N_res, N_res, C_z] pair representation
+ r:
+ [*, N_res] transformation object
+ mask:
+ [*, N_res] mask
+ Returns:
+ [*, N_res, C_s] single representation update
+ """
+ z = [z]
+
+ #######################################
+ # Generate scalar and point activations
+ #######################################
+ # [*, N_res, H * C_hidden]
+ q = self.linear_q(s)
+ kv = self.linear_kv(s)
+
+ # [*, N_res, H, C_hidden]
+ q = q.view(q.shape[:-1] + (self.num_heads, -1))
+
+ # [*, N_res, H, 2 * C_hidden]
+ kv = kv.view(kv.shape[:-1] + (self.num_heads, -1))
+
+ # [*, N_res, H, C_hidden]
+ k, v = torch.split(kv, self.hidden_dim, dim=-1)
+
+ # [*, N_res, H * P_q * 3]
+ q_pts = self.linear_q_points(s)
+
+ # This is kind of clunky, but it's how the original does it
+ # [*, N_res, H * P_q, 3]
+ q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1)
+ q_pts = torch.stack(q_pts, dim=-1)
+ q_pts = r[..., None].apply(q_pts)
+
+ # [*, N_res, H, P_q, 3]
+ q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3))
+
+ # [*, N_res, H * (P_q + P_v) * 3]
+ kv_pts = self.linear_kv_points(s)
+
+ # [*, N_res, H * (P_q + P_v), 3]
+ kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1)
+ kv_pts = torch.stack(kv_pts, dim=-1)
+ kv_pts = r[..., None].apply(kv_pts)
+
+ # [*, N_res, H, (P_q + P_v), 3]
+ kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3))
+
+ # [*, N_res, H, P_q/P_v, 3]
+ k_pts, v_pts = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2)
+
+ ##########################
+ # Compute attention scores
+ ##########################
+ # [*, N_res, N_res, H]
+ b = self.linear_b(z[0])
+
+ if _offload_inference:
+ assert sys.getrefcount(z[0]) == 2
+ z[0] = z[0].cpu()
+
+ # [*, H, N_res, N_res]
+ if is_fp16_enabled():
+ with torch.cuda.amp.autocast(enabled=False):
+ a = torch.matmul(
+ permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden]
+ permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res]
+ )
+ else:
+ a = torch.matmul(
+ permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden]
+ permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res]
+ )
+
+ a *= math.sqrt(1.0 / (3 * self.hidden_dim))
+ a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))
+
+ # [*, N_res, N_res, H, P_q, 3]
+ pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5)
+ pt_att = pt_att**2
+
+ # [*, N_res, N_res, H, P_q]
+ pt_att = sum(torch.unbind(pt_att, dim=-1))
+ head_weights = self.softplus(self.head_weights).view(*((1,) * len(pt_att.shape[:-2]) + (-1, 1)))
+ head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2)))
+ pt_att = pt_att * head_weights
+
+ # [*, N_res, N_res, H]
+ pt_att = torch.sum(pt_att, dim=-1) * (-0.5)
+ # [*, N_res, N_res]
+ square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2)
+ square_mask = self.config.inf * (square_mask - 1)
+
+ # [*, H, N_res, N_res]
+ pt_att = permute_final_dims(pt_att, (2, 0, 1))
+
+ a = a + pt_att
+ a = a + square_mask.unsqueeze(-3)
+ a = self.softmax(a)
+
+ ################
+ # Compute output
+ ################
+ # [*, N_res, H, C_hidden]
+ o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3)
+
+ # [*, N_res, H * C_hidden]
+ o = flatten_final_dims(o, 2)
+
+ # [*, H, 3, N_res, P_v]
+ o_pt = torch.sum(
+ (a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :]),
+ dim=-2,
+ )
+
+ # [*, N_res, H, P_v, 3]
+ o_pt = permute_final_dims(o_pt, (2, 0, 3, 1))
+ o_pt = r[..., None, None].invert_apply(o_pt)
+
+ # [*, N_res, H * P_v]
+ o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt**2, dim=-1) + self.config.epsilon), 2)
+
+ # [*, N_res, H * P_v, 3]
+ o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3)
+
+ if _offload_inference:
+ z[0] = z[0].to(o_pt.device)
+
+ # [*, N_res, H, C_z]
+ o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype))
+
+ # [*, N_res, H * C_z]
+ o_pair = flatten_final_dims(o_pair, 2)
+
+ # [*, N_res, C_s]
+ s = self.linear_out(
+ torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype)
+ )
+
+ return s
+
+
+class EsmFoldBackboneUpdate(nn.Module):
+ """
+ Implements part of Algorithm 23.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.linear = EsmFoldLinear(config.sequence_dim, 6, init="final")
+
+ def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Args:
+ [*, N_res, C_s] single representation
+ Returns:
+ [*, N_res, 6] update vector
+ """
+ # [*, 6]
+ update = self.linear(s)
+
+ return update
+
+
+class EsmFoldStructureModuleTransitionLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ self.linear_1 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="relu")
+ self.linear_2 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="relu")
+ self.linear_3 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init="final")
+
+ self.relu = nn.ReLU()
+
+ def forward(self, s):
+ s_initial = s
+ s = self.linear_1(s)
+ s = self.relu(s)
+ s = self.linear_2(s)
+ s = self.relu(s)
+ s = self.linear_3(s)
+
+ s = s + s_initial
+
+ return s
+
+
+class EsmFoldStructureModuleTransition(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ self.layers = nn.ModuleList()
+ for _ in range(config.num_transition_layers):
+ l = EsmFoldStructureModuleTransitionLayer(config)
+ self.layers.append(l)
+
+ self.dropout = nn.Dropout(config.dropout_rate)
+ self.layer_norm = LayerNorm(config.sequence_dim)
+
+ def forward(self, s):
+ for l in self.layers:
+ s = l(s)
+
+ s = self.dropout(s)
+ s = self.layer_norm(s)
+
+ return s
+
+
+class EsmFoldStructureModule(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ # Buffers to be lazily initialized later
+ # self.default_frames
+ # self.group_idx
+ # self.atom_mask
+ # self.lit_positions
+
+ self.layer_norm_s = LayerNorm(config.sequence_dim)
+ self.layer_norm_z = LayerNorm(config.pairwise_dim)
+
+ self.linear_in = EsmFoldLinear(config.sequence_dim, config.sequence_dim)
+
+ self.ipa = EsmFoldInvariantPointAttention(config)
+
+ self.ipa_dropout = nn.Dropout(config.dropout_rate)
+ self.layer_norm_ipa = LayerNorm(config.sequence_dim)
+
+ self.transition = EsmFoldStructureModuleTransition(config)
+ self.bb_update = EsmFoldBackboneUpdate(config)
+ self.angle_resnet = EsmFoldAngleResnet(config)
+
+ def forward(
+ self,
+ evoformer_output_dict,
+ aatype,
+ mask=None,
+ _offload_inference=False,
+ ):
+ """
+ Args:
+ evoformer_output_dict:
+ Dictionary containing:
+ "single":
+ [*, N_res, C_s] single representation
+ "pair":
+ [*, N_res, N_res, C_z] pair representation
+ aatype:
+ [*, N_res] amino acid indices
+ mask:
+ Optional [*, N_res] sequence mask
+ Returns:
+ A dictionary of outputs
+ """
+ s = evoformer_output_dict["single"]
+
+ if mask is None:
+ # [*, N]
+ mask = s.new_ones(s.shape[:-1])
+
+ # [*, N, C_s]
+ s = self.layer_norm_s(s)
+
+ # [*, N, N, C_z]
+ z = self.layer_norm_z(evoformer_output_dict["pair"])
+
+ z_reference_list = None
+ if _offload_inference:
+ assert sys.getrefcount(evoformer_output_dict["pair"]) == 2
+ evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu()
+ z_reference_list = [z]
+ z = None
+
+ # [*, N, C_s]
+ s_initial = s
+ s = self.linear_in(s)
+
+ # [*, N]
+ rigids = Rigid.identity(
+ s.shape[:-1],
+ s.dtype,
+ s.device,
+ self.training,
+ fmt="quat",
+ )
+ outputs = []
+ for i in range(self.config.num_blocks):
+ # [*, N, C_s]
+ s = s + self.ipa(
+ s,
+ z,
+ rigids,
+ mask,
+ _offload_inference=_offload_inference,
+ _z_reference_list=z_reference_list,
+ )
+ s = self.ipa_dropout(s)
+ s = self.layer_norm_ipa(s)
+ s = self.transition(s)
+
+ # [*, N]
+ rigids = rigids.compose_q_update_vec(self.bb_update(s))
+
+ # To hew as closely as possible to AlphaFold, we convert our
+ # quaternion-based transformations to rotation-matrix ones
+ # here
+ backb_to_global = Rigid(
+ Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None),
+ rigids.get_trans(),
+ )
+
+ backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor)
+
+ # [*, N, 7, 2]
+ unnormalized_angles, angles = self.angle_resnet(s, s_initial)
+
+ all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype)
+
+ pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype)
+
+ scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor)
+
+ preds = {
+ "frames": scaled_rigids.to_tensor_7(),
+ "sidechain_frames": all_frames_to_global.to_tensor_4x4(),
+ "unnormalized_angles": unnormalized_angles,
+ "angles": angles,
+ "positions": pred_xyz,
+ "states": s,
+ }
+
+ outputs.append(preds)
+
+ rigids = rigids.stop_rot_gradient()
+
+ del z, z_reference_list
+
+ if _offload_inference:
+ evoformer_output_dict["pair"] = evoformer_output_dict["pair"].to(s.device)
+
+ outputs = dict_multimap(torch.stack, outputs)
+ outputs["single"] = s
+
+ return outputs
+
+ def _init_residue_constants(self, float_dtype, device):
+ if not hasattr(self, "default_frames"):
+ self.register_buffer(
+ "default_frames",
+ torch.tensor(
+ residue_constants.restype_rigid_group_default_frame,
+ dtype=float_dtype,
+ device=device,
+ requires_grad=False,
+ ),
+ persistent=False,
+ )
+ if not hasattr(self, "group_idx"):
+ self.register_buffer(
+ "group_idx",
+ torch.tensor(
+ residue_constants.restype_atom14_to_rigid_group,
+ device=device,
+ requires_grad=False,
+ ),
+ persistent=False,
+ )
+ if not hasattr(self, "atom_mask"):
+ self.register_buffer(
+ "atom_mask",
+ torch.tensor(
+ residue_constants.restype_atom14_mask,
+ dtype=float_dtype,
+ device=device,
+ requires_grad=False,
+ ),
+ persistent=False,
+ )
+ if not hasattr(self, "lit_positions"):
+ self.register_buffer(
+ "lit_positions",
+ torch.tensor(
+ residue_constants.restype_atom14_rigid_group_positions,
+ dtype=float_dtype,
+ device=device,
+ requires_grad=False,
+ ),
+ persistent=False,
+ )
+
+ def torsion_angles_to_frames(self, r, alpha, f):
+ # Lazily initialize the residue constants on the correct device
+ self._init_residue_constants(alpha.dtype, alpha.device)
+ # Separated purely to make testing less annoying
+ return torsion_angles_to_frames(r, alpha, f, self.default_frames)
+
+ def frames_and_literature_positions_to_atom14_pos(self, r, f): # [*, N, 8] # [*, N]
+ # Lazily initialize the residue constants on the correct device
+ self._init_residue_constants(r.get_rots().dtype, r.get_rots().device)
+ return frames_and_literature_positions_to_atom14_pos(
+ r,
+ f,
+ self.default_frames,
+ self.group_idx,
+ self.atom_mask,
+ self.lit_positions,
+ )
+
+
+class EsmFoldingTrunk(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ c_s = config.sequence_state_dim
+ c_z = config.pairwise_state_dim
+
+ self.pairwise_positional_embedding = EsmFoldRelativePosition(config)
+
+ self.blocks = nn.ModuleList([EsmFoldTriangularSelfAttentionBlock(config) for _ in range(config.num_blocks)])
+
+ self.recycle_bins = 15
+ self.recycle_s_norm = nn.LayerNorm(c_s)
+ self.recycle_z_norm = nn.LayerNorm(c_z)
+ self.recycle_disto = nn.Embedding(self.recycle_bins, c_z)
+ self.recycle_disto.weight[0].detach().zero_()
+
+ self.structure_module = EsmFoldStructureModule(config.structure_module)
+ self.trunk2sm_s = nn.Linear(c_s, config.structure_module.sequence_dim)
+ self.trunk2sm_z = nn.Linear(c_z, config.structure_module.pairwise_dim)
+
+ self.chunk_size = config.chunk_size
+
+ def set_chunk_size(self, chunk_size):
+ # This parameter means the axial attention will be computed
+ # in a chunked manner. This should make the memory used more or less O(L) instead of O(L^2).
+ # It's equivalent to running a for loop over chunks of the dimension we're iterative over,
+ # where the chunk_size is the size of the chunks, so 128 would mean to parse 128-length chunks.
+ self.chunk_size = chunk_size
+
+ def forward(self, seq_feats, pair_feats, true_aa, residx, mask, no_recycles):
+ """
+ Inputs:
+ seq_feats: B x L x C tensor of sequence features pair_feats: B x L x L x C tensor of pair features residx: B
+ x L long tensor giving the position in the sequence mask: B x L boolean tensor indicating valid residues
+
+ Output:
+ predicted_structure: B x L x (num_atoms_per_residue * 3) tensor wrapped in a Coordinates object
+ """
+
+ device = seq_feats.device
+ s_s_0 = seq_feats
+ s_z_0 = pair_feats
+
+ if no_recycles is None:
+ no_recycles = self.config.max_recycles
+ else:
+ if no_recycles < 0:
+ raise ValueError("Number of recycles must not be negative.")
+ no_recycles += 1 # First 'recycle' is just the standard forward pass through the model.
+
+ def trunk_iter(s, z, residx, mask):
+ z = z + self.pairwise_positional_embedding(residx, mask=mask)
+
+ for block in self.blocks:
+ s, z = block(s, z, mask=mask, residue_index=residx, chunk_size=self.chunk_size)
+ return s, z
+
+ s_s = s_s_0
+ s_z = s_z_0
+ recycle_s = torch.zeros_like(s_s)
+ recycle_z = torch.zeros_like(s_z)
+ recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64)
+
+ for recycle_idx in range(no_recycles):
+ with ContextManagers([] if recycle_idx == no_recycles - 1 else [torch.no_grad()]):
+ # === Recycling ===
+ recycle_s = self.recycle_s_norm(recycle_s.detach()).to(device)
+ recycle_z = self.recycle_z_norm(recycle_z.detach()).to(device)
+ recycle_z += self.recycle_disto(recycle_bins.detach()).to(device)
+
+ s_s, s_z = trunk_iter(s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask)
+
+ # === Structure module ===
+ structure = self.structure_module(
+ {"single": self.trunk2sm_s(s_s), "pair": self.trunk2sm_z(s_z)},
+ true_aa,
+ mask.float(),
+ )
+
+ recycle_s = s_s
+ recycle_z = s_z
+ # Distogram needs the N, CA, C coordinates, and bin constants same as alphafold.
+ recycle_bins = EsmFoldingTrunk.distogram(
+ structure["positions"][-1][:, :, :3],
+ 3.375,
+ 21.375,
+ self.recycle_bins,
+ )
+
+ structure["s_s"] = s_s
+ structure["s_z"] = s_z
+
+ return structure
+
+ @staticmethod
+ def distogram(coords, min_bin, max_bin, num_bins):
+ # Coords are [... L x 3 x 3], where it's [N, CA, C] x 3 coordinates.
+ boundaries = torch.linspace(
+ min_bin,
+ max_bin,
+ num_bins - 1,
+ device=coords.device,
+ )
+ boundaries = boundaries**2
+ N, CA, C = [x.squeeze(-2) for x in coords.chunk(3, dim=-2)]
+ # Infer CB coordinates.
+ b = CA - N
+ c = C - CA
+ a = b.cross(c, dim=-1)
+ CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA
+ dists = (CB[..., None, :, :] - CB[..., :, None, :]).pow(2).sum(dim=-1, keepdims=True)
+ bins = torch.sum(dists > boundaries, dim=-1) # [..., L, L]
+ return bins
+
+
+# TODO Add information to the docstring about any methods that convert to PDB format, or otherwise prepare
+# the outputs for downstream use.
+
+
+@add_start_docstrings(
+ """
+ ESMForProteinFolding is the HuggingFace port of the original ESMFold model. It consists of an ESM-2 "stem" followed
+ by a protein folding "head", although unlike most other output heads, this "head" is similar in size and runtime to
+ the rest of the model combined! It outputs a dictionary containing predicted structural information about the input
+ protein(s).
+ """,
+ ESM_START_DOCSTRING,
+)
+class EsmForProteinFolding(EsmPreTrainedModel):
+ _no_split_modules = ["EsmFoldStructureModule", "EsmFoldTriangularSelfAttentionBlock"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.config = config
+
+ self.distogram_bins = 64
+
+ self.esm = EsmModel(config, add_pooling_layer=False)
+
+ self.esm.requires_grad_(False)
+ if self.config.esmfold_config.fp16_esm:
+ self.esm.half()
+
+ self.esm_feats = self.config.hidden_size
+ self.esm_attns = self.config.num_hidden_layers * self.config.num_attention_heads
+ self.esm_layers = self.config.num_hidden_layers
+ self.register_buffer("af2_to_esm", self._af2_to_esm_from_vocab_list(config.vocab_list))
+ self.esm_s_combine = nn.Parameter(torch.zeros(self.esm_layers + 1))
+
+ trunk_config = self.config.esmfold_config.trunk
+ c_s = trunk_config.sequence_state_dim
+ c_z = trunk_config.pairwise_state_dim
+ self.esm_s_mlp = nn.Sequential(
+ LayerNorm(self.esm_feats),
+ nn.Linear(self.esm_feats, c_s),
+ nn.ReLU(),
+ nn.Linear(c_s, c_s),
+ )
+
+ # 0 is padding, N is unknown residues, N + 1 is mask.
+ self.n_tokens_embed = residue_constants.restype_num + 3
+ self.pad_idx = 0
+ self.unk_idx = self.n_tokens_embed - 2
+ self.mask_idx = self.n_tokens_embed - 1
+ self.esm_dict_cls_idx = self.config.vocab_list.index("")
+ self.esm_dict_mask_idx = self.config.vocab_list.index("")
+ self.esm_dict_eos_idx = self.config.vocab_list.index("")
+ self.esm_dict_padding_idx = self.config.vocab_list.index("")
+ if self.config.esmfold_config.embed_aa:
+ self.embedding = nn.Embedding(self.n_tokens_embed, c_s, padding_idx=0)
+
+ self.trunk = EsmFoldingTrunk(trunk_config)
+
+ self.distogram_head = nn.Linear(c_z, self.distogram_bins)
+ self.ptm_head = nn.Linear(c_z, self.distogram_bins)
+ self.lm_head = nn.Linear(c_s, self.n_tokens_embed)
+ self.lddt_bins = 50
+ structure_module_config = trunk_config.structure_module
+ self.lddt_head = nn.Sequential(
+ nn.LayerNorm(structure_module_config.sequence_dim),
+ nn.Linear(structure_module_config.sequence_dim, self.config.esmfold_config.lddt_head_hid_dim),
+ nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, self.config.esmfold_config.lddt_head_hid_dim),
+ nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, 37 * self.lddt_bins),
+ )
+
+ @staticmethod
+ def _af2_to_esm_from_vocab_list(vocab_list: List[str]) -> torch.Tensor:
+ # Remember that t is shifted from residue_constants by 1 (0 is padding).
+ esm_reorder = [vocab_list.index("")] + [vocab_list.index(v) for v in residue_constants.restypes_with_x]
+ return torch.tensor(esm_reorder)
+
+ @add_start_docstrings_to_model_forward(ESMFOLD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=EsmForProteinFoldingOutput, config_class=EsmConfig)
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ masking_pattern: Optional[torch.Tensor] = None,
+ num_recycles: Optional[int] = None,
+ ) -> EsmForProteinFoldingOutput:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, EsmForProteinFolding
+
+ >>> model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1")
+ >>> inputs = tokenizer(["MLKNVQVQLV"], return_tensors="pt", add_special_tokens=False) # A tiny random peptide
+ >>> outputs = model(**inputs)
+ >>> folded_positions = outputs.positions
+ ```
+
+ """
+ cfg = self.config.esmfold_config
+
+ aa = input_ids # B x L
+ B = aa.shape[0]
+ L = aa.shape[1]
+ device = input_ids.device
+ if attention_mask is None:
+ attention_mask = torch.ones_like(aa, device=device)
+ if position_ids is None:
+ position_ids = torch.arange(L, device=device).expand_as(input_ids)
+
+ # === ESM ===
+ esmaa = self.af2_idx_to_esm_idx(aa, attention_mask)
+
+ if masking_pattern is not None:
+ masked_aa, esmaa, mlm_targets = self.bert_mask(aa, esmaa, attention_mask, masking_pattern)
+ else:
+ masked_aa = aa
+ mlm_targets = None
+
+ # We get sequence and pair representations from whatever version of ESM /
+ # configuration we are using. The sequence representation esm_s is always
+ # present. The pair embedding esm_z may be present depending on the
+ # configuration of the model. If esm_z is not used by the model then it
+ # is returned as None here.
+ esm_s = self.compute_language_model_representations(esmaa)
+
+ # Convert esm_s and esm_z, if present, to the precision used by the trunk and
+ # the structure module. These tensors may be a lower precision if, for example,
+ # we're running the language model in fp16 precision.
+ esm_s = esm_s.to(self.esm_s_combine.dtype)
+
+ if cfg.esm_ablate_sequence:
+ esm_s = esm_s * 0
+
+ esm_s = esm_s.detach()
+
+ # === preprocessing ===
+ esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2)
+ s_s_0 = self.esm_s_mlp(esm_s)
+
+ s_z_0 = s_s_0.new_zeros(B, L, L, cfg.trunk.pairwise_state_dim)
+
+ if self.config.esmfold_config.embed_aa:
+ s_s_0 += self.embedding(masked_aa)
+
+ structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles)
+ # Documenting what we expect:
+ structure = {
+ k: v
+ for k, v in structure.items()
+ if k
+ in [
+ "s_z",
+ "s_s",
+ "frames",
+ "sidechain_frames",
+ "unnormalized_angles",
+ "angles",
+ "positions",
+ "states",
+ ]
+ }
+
+ # Add BERT mask for the loss to use, if available.
+ if mlm_targets:
+ structure["mlm_targets"] = mlm_targets
+
+ disto_logits = self.distogram_head(structure["s_z"])
+ disto_logits = (disto_logits + disto_logits.transpose(1, 2)) / 2
+ structure["distogram_logits"] = disto_logits
+
+ lm_logits = self.lm_head(structure["s_s"])
+ structure["lm_logits"] = lm_logits
+
+ structure["aatype"] = aa
+ make_atom14_masks(structure)
+ # Of course, this doesn't respect the true mask because it doesn't know about it...
+ # We're not going to properly mask change of index tensors:
+ # "residx_atom14_to_atom37",
+ # "residx_atom37_to_atom14",
+ for k in [
+ "atom14_atom_exists",
+ "atom37_atom_exists",
+ ]:
+ structure[k] *= attention_mask.unsqueeze(-1)
+ structure["residue_index"] = position_ids
+
+ lddt_head = self.lddt_head(structure["states"]).reshape(structure["states"].shape[0], B, L, -1, self.lddt_bins)
+ structure["lddt_head"] = lddt_head
+ plddt = categorical_lddt(lddt_head[-1], bins=self.lddt_bins)
+ structure["plddt"] = plddt
+
+ ptm_logits = self.ptm_head(structure["s_z"])
+ structure["ptm_logits"] = ptm_logits
+ structure["ptm"] = compute_tm(ptm_logits, max_bin=31, no_bins=self.distogram_bins)
+ structure.update(compute_predicted_aligned_error(ptm_logits, max_bin=31, no_bins=self.distogram_bins))
+
+ return EsmForProteinFoldingOutput(**structure)
+
+ def af2_idx_to_esm_idx(self, aa, mask):
+ # avoid indexing on different devices
+ if self.af2_to_esm.device != aa.device:
+ self.af2_to_esm = self.af2_to_esm.to(aa.device)
+ aa = (aa + 1).masked_fill(mask != 1, 0)
+ return self.af2_to_esm[aa]
+
+ def compute_language_model_representations(self, esmaa: torch.Tensor) -> torch.Tensor:
+ device = next(self.parameters()).device
+ B, L = esmaa.shape # B = batch size, L = sequence length.
+
+ if self.config.esmfold_config.bypass_lm:
+ esm_s = torch.zeros(B, L, self.esm_s_combine.size[0], -1, self.esm_feats, device=device)
+ return esm_s
+
+ bosi, eosi = self.esm_dict_cls_idx, self.esm_dict_eos_idx
+ bos = esmaa.new_full((B, 1), bosi)
+ eos = esmaa.new_full((B, 1), self.esm_dict_padding_idx)
+ esmaa = torch.cat([bos, esmaa, eos], dim=1)
+ # Use the first padding index as eos during inference.
+ esmaa[range(B), (esmaa != 1).sum(1)] = eosi
+
+ # _, esm_z, esm_s = self.esm(esmaa, return_pairs=self.config.esmfold_config.use_esm_attn_map)
+ # Because we do not support use_esm_attn_map in the HF port as it is not used in any public models,
+ # esm_z is always None
+ esm_hidden_states = self.esm(esmaa, attention_mask=esmaa != 1, output_hidden_states=True)["hidden_states"]
+ esm_s = torch.stack(esm_hidden_states, dim=2)
+
+ esm_s = esm_s[:, 1:-1] # B, L, nLayers, C
+
+ return esm_s
+
+ def bert_mask(self, aa, esmaa, mask, pattern):
+ new_aa = aa.clone()
+ target = aa.clone()
+ new_esmaa = esmaa.clone()
+ new_aa[pattern == 1] = self.mask_idx
+ target[pattern != 1] = 0
+ new_esmaa[pattern == 1] = self.esm_dict_mask_idx
+ return new_aa, new_esmaa, target
+
+ @torch.no_grad()
+ def infer(
+ self,
+ seqs: Union[str, List[str]],
+ position_ids=None,
+ ):
+ if isinstance(seqs, str):
+ lst = [seqs]
+ else:
+ lst = seqs
+ # Returns the raw outputs of the model given an input sequence.
+ device = next(self.parameters()).device
+ aatype = collate_dense_tensors(
+ [
+ torch.from_numpy(
+ residue_constants.sequence_to_onehot(
+ sequence=seq,
+ mapping=residue_constants.restype_order_with_x,
+ map_unknown_to_x=True,
+ )
+ )
+ .to(device)
+ .argmax(dim=1)
+ for seq in lst
+ ]
+ ) # B=1 x L
+ mask = collate_dense_tensors([aatype.new_ones(len(seq)) for seq in lst])
+ position_ids = (
+ torch.arange(aatype.shape[1], device=device).expand(len(lst), -1)
+ if position_ids is None
+ else position_ids.to(device)
+ )
+ if position_ids.ndim == 1:
+ position_ids = position_ids.unsqueeze(0)
+ return self.forward(
+ aatype,
+ mask,
+ position_ids=position_ids,
+ )
+
+ @staticmethod
+ def output_to_pdb(output: Dict) -> List[str]:
+ """Returns the pbd (file) string from the model given the model output."""
+ output = {k: v.to("cpu").numpy() for k, v in output.items()}
+ pdbs = []
+ final_atom_positions = atom14_to_atom37(output["positions"][-1], output)
+ final_atom_mask = output["atom37_atom_exists"]
+ for i in range(output["aatype"].shape[0]):
+ aa = output["aatype"][i]
+ pred_pos = final_atom_positions[i]
+ mask = final_atom_mask[i]
+ resid = output["residue_index"][i] + 1
+ pred = OFProtein(
+ aatype=aa,
+ atom_positions=pred_pos,
+ atom_mask=mask,
+ residue_index=resid,
+ b_factors=output["plddt"][i],
+ )
+ pdbs.append(to_pdb(pred))
+ return pdbs
+
+ def infer_pdb(self, seqs, *args, **kwargs) -> str:
+ """Returns the pdb (file) string from the model given an input sequence."""
+ assert isinstance(seqs, str)
+ output = self.infer(seqs, *args, **kwargs)
+ return self.output_to_pdb(output)[0]
+
+ def infer_pdbs(self, seqs: List[str], *args, **kwargs) -> List[str]:
+ """Returns the pdb (file) string from the model given an input sequence."""
+ output = self.infer(seqs, *args, **kwargs)
+ return self.output_to_pdb(output)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_tf_esm.py b/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_tf_esm.py
new file mode 100644
index 0000000000000000000000000000000000000000..2688c207b0adaca4ee79c37c8529694f608490b6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/modeling_tf_esm.py
@@ -0,0 +1,1567 @@
+# coding=utf-8
+# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ESM model."""
+
+
+from __future__ import annotations
+
+import os
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
+from ...modeling_tf_outputs import (
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFBaseModelOutputWithPoolingAndCrossAttentions,
+ TFMaskedLMOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ shape_list,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, stable_softmax
+from ...utils import logging
+from .configuration_esm import EsmConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/esm2_t6_8M_UR50D"
+_CONFIG_FOR_DOC = "EsmConfig"
+
+
+def rotate_half(x):
+ x1, x2 = tf.split(x, 2, axis=-1)
+ return tf.concat((-x2, x1), axis=-1)
+
+
+def apply_rotary_pos_emb(x, cos, sin):
+ cos = cos[:, :, : tf.shape(x)[-2], :]
+ sin = sin[:, :, : tf.shape(x)[-2], :]
+
+ return (x * cos) + (rotate_half(x) * sin)
+
+
+def symmetrize(x):
+ "Make layer symmetric in final two dimensions, used for contact prediction."
+ return x + tf.linalg.matrix_transpose(x) # Transposes last two dimensions only
+
+
+def average_product_correct(x):
+ "Perform average product correct, used for contact prediction."
+ a1 = tf.reduce_sum(x, -1, keepdims=True)
+ a2 = tf.reduce_sum(x, -2, keepdims=True)
+ a12 = tf.reduce_sum(x, (-1, -2), keepdims=True)
+
+ avg = a1 * a2
+ avg = avg / a12
+ normalized = x - avg
+ return normalized
+
+
+class TFRotaryEmbedding(keras.layers.Layer):
+ """
+ Rotary position embeddings based on those in
+ [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
+ matrices which depend on their relative positions.
+ """
+
+ def __init__(self, dim: int, name=None):
+ super().__init__(name=name)
+ # Matt: The PyTorch version of this layer does a lot of work to cache values, but we just rely on TF compilation
+ # and/or XLA to sort out constants like that. It actually may not seem like this layer needs to be stateful at
+ # all when we benefit from TF compilation, but it does. The reason is that self.inv_freq is a buffer in the
+ # original implementation, but all the shared ESM checkpoints were trained with fp16 params. This means that
+ # the inv_freq tensor was stored as a float16, and we need to replicate those lower-precision values or our
+ # models give different outputs from the original.
+ self.dim = dim
+
+ def build(self, input_shape):
+ super().build(input_shape)
+ self.inv_freq = self.add_weight(
+ "inv_freq", shape=(self.dim // 2,), dtype=tf.float32, initializer=get_initializer(1.0), trainable=False
+ )
+ self.inv_freq.assign(
+ 1.0 / (10000 ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim))
+ )
+
+ def _compute_cos_sin(self, x, seq_dimension=2):
+ seq_len = tf.shape(x)[seq_dimension]
+
+ t = tf.range(seq_len, dtype=self.inv_freq.dtype)
+ freqs = tf.einsum("i, j -> ij", t, self.inv_freq) # Outer multiplication
+ emb = tf.concat((freqs, freqs), axis=-1)[None, None, :, :]
+
+ return tf.cos(emb), tf.sin(emb)
+
+ def call(self, q: tf.Tensor, k: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
+ cos_emb, sin_emb = self._compute_cos_sin(k, seq_dimension=-2)
+
+ return (
+ apply_rotary_pos_emb(q, cos_emb, sin_emb),
+ apply_rotary_pos_emb(k, cos_emb, sin_emb),
+ )
+
+
+class TFEsmContactPredictionHead(keras.layers.Layer):
+ """Performs symmetrization, apc, and computes a logistic regression on the output features"""
+
+ def __init__(
+ self,
+ in_features: int,
+ bias=True,
+ eos_idx: int = 2,
+ name=None,
+ ):
+ super().__init__(name=name)
+ self.eos_idx = eos_idx
+ self.in_features = in_features
+ self.regression = keras.layers.Dense(1, use_bias=bias, activation="sigmoid", name="regression")
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "regression", None) is not None:
+ with tf.name_scope(self.regression.name):
+ self.regression.build((None, self.in_features))
+
+ def call(self, tokens, attentions):
+ # remove eos token attentions
+ eos_mask = tf.cast(tokens != self.eos_idx, attentions.dtype)
+ eos_mask = tf.expand_dims(eos_mask, 1) * tf.expand_dims(eos_mask, 2)
+ attentions = attentions * eos_mask[:, None, None, :, :]
+ attentions = attentions[..., :-1, :-1]
+ # remove cls token attentions
+ attentions = attentions[..., 1:, 1:]
+ batch_size, layers, heads, seqlen, _ = shape_list(attentions)
+ attentions = tf.reshape(attentions, (batch_size, layers * heads, seqlen, seqlen))
+
+ # features: batch x channels x tokens x tokens (symmetric)
+ attentions = average_product_correct(symmetrize(attentions))
+ attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))
+ return tf.squeeze(self.regression(attentions), 3)
+
+
+class TFEsmEmbeddings(keras.layers.Layer):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ def __init__(self, config, name=None):
+ super().__init__(name=name)
+ self.word_embeddings = keras.layers.Embedding(
+ config.vocab_size,
+ config.hidden_size,
+ embeddings_initializer=get_initializer(config.initializer_range),
+ name="word_embeddings",
+ )
+ self.position_embeddings = keras.layers.Embedding(
+ config.max_position_embeddings,
+ config.hidden_size,
+ embeddings_initializer=get_initializer(config.initializer_range),
+ name="position_embeddings",
+ )
+
+ if config.emb_layer_norm_before:
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ else:
+ self.layer_norm = None
+ # Matt: I think this line was copied incorrectly from BERT, disabling for now
+ # self.dropout = Dropout(config.hidden_dropout_prob)
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+
+ self.position_ids = tf.range(config.max_position_embeddings)[None, :]
+
+ self.padding_idx = config.pad_token_id
+ self.token_dropout = config.token_dropout
+ self.mask_token_id = config.mask_token_id
+ self.config = config
+
+ def call(
+ self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an
+ # embedding_scale factor here.
+ embeddings = inputs_embeds
+
+ # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout
+ # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however,
+ # masked tokens are treated as if they were selected for input dropout and zeroed out.
+ # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by
+ # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample).
+ # This is analogous to the way that dropout layers scale down outputs during evaluation when not
+ # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training).
+ if self.token_dropout:
+ embeddings = tf.where((input_ids == self.mask_token_id)[:, :, None], 0.0, embeddings)
+ mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs
+ src_lengths = tf.cast(tf.reduce_sum(attention_mask, axis=-1), tf.float32)
+ masked_tokens = input_ids == self.mask_token_id
+ mask_ratio_observed = tf.math.count_nonzero(masked_tokens, dtype=tf.float32, axis=-1) / src_lengths
+ embeddings = embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
+
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+
+ if self.layer_norm is not None:
+ embeddings = self.layer_norm(embeddings)
+ if attention_mask is not None:
+ embeddings = embeddings * tf.cast(tf.expand_dims(attention_mask, -1), embeddings.dtype)
+ # Matt: I think this line was copied incorrectly from BERT, disabling it for now.
+ # embeddings = self.dropout(embeddings)
+ return embeddings
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: tf.Tensor
+
+ Returns: tf.Tensor
+ """
+ input_shape = shape_list(inputs_embeds)[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = tf.range(
+ start=self.padding_idx + 1, limit=sequence_length + self.padding_idx + 1, dtype=tf.int64
+ )
+ return tf.broadcast_to(tf.expand_dims(position_ids, 0), input_shape)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "word_embeddings", None) is not None:
+ with tf.name_scope(self.word_embeddings.name):
+ self.word_embeddings.build(None)
+ if getattr(self, "position_embeddings", None) is not None:
+ with tf.name_scope(self.position_embeddings.name):
+ self.position_embeddings.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+
+
+class TFEsmSelfAttention(keras.layers.Layer):
+ def __init__(self, config, position_embedding_type=None, name=None):
+ super().__init__(name=name)
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
+ )
+ self.key = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
+ )
+ self.value = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
+ )
+
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ self.rotary_embeddings = None
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = keras.layers.Embedding(
+ 2 * config.max_position_embeddings - 1,
+ self.attention_head_size,
+ embeddings_initializer=get_initializer(config.initializer_range),
+ )
+ elif self.position_embedding_type == "rotary":
+ self.rotary_embeddings = TFRotaryEmbedding(dim=self.attention_head_size, name="rotary_embeddings")
+
+ self.is_decoder = config.is_decoder
+ self.config = config
+
+ def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor:
+ new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size]
+ x = tf.reshape(x, new_x_shape)
+ return tf.transpose(x, perm=(0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ encoder_hidden_states: tf.Tensor | None = None,
+ encoder_attention_mask: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ output_attentions: Optional[bool] = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim).
+ # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent,
+ # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original
+ # ESM code and fix rotary embeddings.
+ query_layer = query_layer * self.attention_head_size**-0.5
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ if self.position_embedding_type == "rotary":
+ query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ seq_length = shape_list(hidden_states)[1]
+ position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), -1)
+ position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), 0)
+ distance = position_ids_l - position_ids_r
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = tf.cast(positional_embedding, query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = tf.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = tf.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in EsmModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = attention_probs @ value_layer
+
+ context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3))
+ new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size]
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.config.hidden_size])
+ if getattr(self, "rotary_embeddings", None) is not None:
+ with tf.name_scope(self.rotary_embeddings.name):
+ self.rotary_embeddings.build(None)
+
+
+class TFEsmSelfOutput(keras.layers.Layer):
+ def __init__(self, config, name=None):
+ super().__init__(name=name)
+ self.dense = keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states, input_tensor, training=False):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states += input_tensor
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFEsmAttention(keras.layers.Layer):
+ def __init__(self, config, name=None):
+ super().__init__(name=name)
+ self.self = TFEsmSelfAttention(config, name="self")
+ self.output_layer = TFEsmSelfOutput(config, name="output")
+ self.pruned_heads = set()
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.config = config
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ training=False,
+ ):
+ hidden_states_ln = self.LayerNorm(hidden_states)
+ self_outputs = self.self(
+ hidden_states_ln,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ training,
+ )
+ attention_output = self.output_layer(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self", None) is not None:
+ with tf.name_scope(self.self.name):
+ self.self.build(None)
+ if getattr(self, "output_layer", None) is not None:
+ with tf.name_scope(self.output_layer.name):
+ self.output_layer.build(None)
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+class TFEsmIntermediate(keras.layers.Layer):
+ def __init__(self, config: EsmConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = tf.nn.gelu(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFEsmOutput(keras.layers.Layer):
+ def __init__(self, config, name=None):
+ super().__init__(name=name)
+ self.dense = keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states, input_tensor, training=False):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states += input_tensor
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+
+
+class TFEsmLayer(keras.layers.Layer):
+ def __init__(self, config, name=None):
+ super().__init__(name=name)
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = TFEsmAttention(config, name="attention")
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = TFEsmAttention(config)
+ self.intermediate = TFEsmIntermediate(config, name="intermediate")
+ self.output_layer = TFEsmOutput(config, name="output")
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ training=False,
+ ):
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ training=training,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise AttributeError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated"
+ " with cross-attention layers by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ training=training,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layernorm_output = self.LayerNorm(attention_output)
+ intermediate_output = self.intermediate(hidden_states=layernorm_output)
+ layer_output = self.output_layer(
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
+ )
+ outputs = (layer_output,) + outputs # add attentions if we output them
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "output_layer", None) is not None:
+ with tf.name_scope(self.output_layer.name):
+ self.output_layer.build(None)
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+class TFEsmEncoder(keras.layers.Layer):
+ def __init__(self, config, name=None):
+ super().__init__(name=name)
+ self.config = config
+ self.layer = [TFEsmLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+ self.emb_layer_norm_after = keras.layers.LayerNormalization(
+ epsilon=config.layer_norm_eps, name="emb_layer_norm_after"
+ )
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ training=False,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ training,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if self.emb_layer_norm_after:
+ hidden_states = self.emb_layer_norm_after(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "emb_layer_norm_after", None) is not None:
+ with tf.name_scope(self.emb_layer_norm_after.name):
+ self.emb_layer_norm_after.build([None, None, self.config.hidden_size])
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Esm
+class TFEsmPooler(keras.layers.Layer):
+ def __init__(self, config: EsmConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(inputs=first_token_tensor)
+
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFEsmPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = EsmConfig
+ base_model_prefix = "esm"
+
+
+ESM_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Keras [Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a
+ regular Keras model and refer to the TF/Keras documentation for all matters related to general usage and behavior.
+
+ Parameters:
+ config ([`EsmConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ESM_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
+ ESM_START_DOCSTRING,
+)
+class TFEsmMainLayer(keras.layers.Layer):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
+
+ def __init__(self, config, add_pooling_layer=True, name=None, **kwargs):
+ super().__init__(name=name, **kwargs)
+
+ self.config = config
+ self.is_decoder = config.is_decoder
+
+ self.embeddings = TFEsmEmbeddings(config, name="embeddings")
+ self.encoder = TFEsmEncoder(config, name="encoder")
+ self.pooler = TFEsmPooler(config, name="pooler") if add_pooling_layer else None
+
+ self.contact_head = TFEsmContactPredictionHead(
+ in_features=self.config.num_hidden_layers * self.config.num_attention_heads, bias=True, name="contact_head"
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+ if getattr(self, "contact_head", None) is not None:
+ with tf.name_scope(self.contact_head.name):
+ self.contact_head.build(None)
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value: tf.Variable):
+ self.embeddings.word_embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
+ if not self.config.is_decoder:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+
+ if past_key_values is None:
+ past_key_values_length = 0
+ past_key_values = [None] * len(self.encoder.layer)
+ else:
+ past_key_values_length = shape_list(past_key_values[0][0])[-2]
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ training=training,
+ )
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask_shape = shape_list(attention_mask)
+
+ mask_seq_length = seq_length + past_key_values_length
+ # Copied from `modeling_tf_t5.py`
+ # Provided a padding mask of dimensions [batch_size, mask_seq_length]
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
+ if self.is_decoder:
+ seq_ids = tf.range(mask_seq_length)
+ causal_mask = tf.less_equal(
+ tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)),
+ seq_ids[None, :, None],
+ )
+ causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype)
+ extended_attention_mask = causal_mask * attention_mask[:, None, :]
+ attention_mask_shape = shape_list(extended_attention_mask)
+ extended_attention_mask = tf.reshape(
+ extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])
+ )
+ if past_key_values[0] is not None:
+ # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length]
+ extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :]
+ else:
+ extended_attention_mask = tf.reshape(
+ attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])
+ )
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
+
+ # Copied from `modeling_tf_t5.py` with -1e9 -> -10000
+ if self.is_decoder and encoder_attention_mask is not None:
+ # If a 2D ou 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length]
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype)
+ num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask))
+ if num_dims_encoder_attention_mask == 3:
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
+ if num_dims_encoder_attention_mask == 2:
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
+
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270
+ # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask,
+ # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2)))
+
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.config.num_hidden_layers
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (
+ sequence_output,
+ pooled_output,
+ ) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+ def predict_contacts(self, tokens, attention_mask):
+ attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions
+ attns = tf.stack(attns, axis=1) # Matches the original model layout
+ # In the original model, attentions for padding tokens are completely zeroed out.
+ # This makes no difference most of the time because the other tokens won't attend to them,
+ # but it does for the contact prediction task, which takes attentions as input,
+ # so we have to mimic that here.
+ attention_mask = tf.cast(attention_mask, attns.dtype)
+ attns *= attention_mask[:, None, None, None]
+ attns *= attention_mask[:, None, None, :, None]
+ return self.contact_head(tokens, attns)
+
+
+@add_start_docstrings(
+ "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.",
+ ESM_START_DOCSTRING,
+)
+class TFEsmModel(TFEsmPreTrainedModel):
+ def __init__(self, config: EsmConfig, add_pooling_layer=True, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=add_pooling_layer, name="esm")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]:
+ r"""
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`). Set to `False` during training, `True` during generation
+ """
+ outputs = self.esm(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def predict_contacts(self, tokens, attention_mask):
+ return self.esm.predict_contacts(tokens, attention_mask)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "esm", None) is not None:
+ with tf.name_scope(self.esm.name):
+ self.esm.build(None)
+
+
+@add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING)
+class TFEsmForMaskedLM(TFEsmPreTrainedModel, TFMaskedLanguageModelingLoss):
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
+ self.lm_head = TFEsmLMHead(config, name="lm_head")
+ if config.tie_word_embeddings:
+ # Ensure word embeddings are built so that we actually have something to tie
+ with tf.name_scope(os.path.join(self._name_scope(), "esm", "embeddings", "word_embeddings")):
+ self.esm.embeddings.word_embeddings.build((None, None))
+ self.lm_head.decoder = self.esm.embeddings.word_embeddings.weights[0]
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ def get_lm_head(self):
+ return self.lm_head
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Used to hide legacy arguments that have been deprecated.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.esm(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ masked_lm_loss = self.hf_compute_loss(labels=labels, logits=prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def predict_contacts(self, tokens, attention_mask):
+ return self.esm.predict_contacts(tokens, attention_mask)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "esm", None) is not None:
+ with tf.name_scope(self.esm.name):
+ self.esm.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build(None)
+
+
+class TFEsmLMHead(keras.layers.Layer):
+ """ESM Head for masked language modeling."""
+
+ def __init__(self, config, name=None):
+ super().__init__(name=name)
+ self.dense = keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ if config.tie_word_embeddings:
+ self.decoder = None
+ else:
+ self.decoder = keras.layers.Dense(
+ config.vocab_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="decoder",
+ use_bias=False,
+ )
+ self.config = config
+
+ def build(self, input_shape=None):
+ # Separate bias to match the PT model and allow weight cross-loading to work
+ # Put it in the build so it gets the right name when adding it as a weight
+ if self.built:
+ return
+ self.built = True
+ self.bias = self.add_weight("bias", shape=(self.config.vocab_size,), initializer="zeros", trainable=True)
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+ if getattr(self, "decoder", None) is not None and not self.config.tie_word_embeddings:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build([None, None, self.config.hidden_size])
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def call(self, features):
+ x = self.dense(features)
+ x = tf.nn.gelu(x)
+ x = self.layer_norm(x)
+
+ # project back to size of vocabulary with bias
+ if self.config.tie_word_embeddings:
+ x = tf.matmul(x, self.decoder, transpose_b=True) + self.bias
+ else:
+ x = self.decoder(x) + self.bias
+ return x
+
+
+@add_start_docstrings(
+ """
+ ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ ESM_START_DOCSTRING,
+)
+class TFEsmForSequenceClassification(TFEsmPreTrainedModel, TFSequenceClassificationLoss):
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
+ self.classifier = TFEsmClassificationHead(config, name="classifier")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.esm(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "esm", None) is not None:
+ with tf.name_scope(self.esm.name):
+ self.esm.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build(None)
+
+
+@add_start_docstrings(
+ """
+ ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ ESM_START_DOCSTRING,
+)
+class TFEsmForTokenClassification(TFEsmPreTrainedModel, TFTokenClassificationLoss):
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name="esm")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(config.num_labels, name="classifier")
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.esm(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "esm", None) is not None:
+ with tf.name_scope(self.esm.name):
+ self.esm.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+class TFEsmClassificationHead(keras.layers.Layer):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config, name=None):
+ super().__init__(name=name)
+ self.dense = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.out_proj = keras.layers.Dense(
+ config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="linear",
+ name="out_proj",
+ )
+ self.config = config
+
+ def call(self, features, training=False):
+ x = features[:, 0, :] # take token (equiv. to [CLS])
+ x = self.dropout(x, training=training)
+ x = self.dense(x)
+ x = self.dropout(x, training=training)
+ x = self.out_proj(x)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.config.hidden_size])
+
+
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: tf.Tensor x:
+
+ Returns: tf.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = tf.cast(input_ids != padding_idx, tf.int64)
+ incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask
+ return incremental_indices + padding_idx
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..02a8c149ae320dd9b045edc5df31760a4eebefd9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__init__.py
@@ -0,0 +1,8 @@
+from .chunk_utils import chunk_layer
+from .data_transforms import make_atom14_masks
+from .feats import atom14_to_atom37, frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames
+from .loss import compute_predicted_aligned_error, compute_tm
+from .protein import Protein as OFProtein
+from .protein import to_pdb
+from .rigid_utils import Rigid, Rotation
+from .tensor_utils import dict_multimap, flatten_final_dims, permute_final_dims
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5ed7771b18feff3a3cdf5a4609f6afcd1fb302b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/chunk_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/chunk_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..06c8294cafe9405134e2998df8dfd074ed236ab7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/chunk_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/data_transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/data_transforms.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..139773568fffa8d9d889a6bb9235c62def508eb6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/data_transforms.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/feats.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/feats.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..feeaf66533d0e9bc08b76d4405d3ad73902abaf5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/feats.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/loss.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/loss.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72da14ff8bf6ba667ca208822e4be1785cd81522
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/loss.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/protein.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/protein.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49c95f593eb575186f1607bc670558a0f734c5ed
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/protein.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/residue_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/residue_constants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5beb22a2a9305926b3b7e0aa2a1c090959027d0b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/residue_constants.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/rigid_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/rigid_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a4a99af54b094fd69239dba33b65d90879c05b2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/rigid_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/tensor_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/tensor_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..984fc9c533ad4ef885ad457a950c0ff178bbe6f2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/__pycache__/tensor_utils.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/chunk_utils.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/chunk_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..301721d135ee4d63ff111d45c06471c50c89e925
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/chunk_utils.py
@@ -0,0 +1,397 @@
+# Copyright 2021 AlQuraishi Laboratory
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+import math
+from functools import partial
+from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
+
+import torch
+
+from .tensor_utils import tensor_tree_map, tree_map
+
+
+def _fetch_dims(tree: Union[dict, list, tuple, torch.Tensor]) -> List[Tuple[int, ...]]:
+ shapes = []
+ if isinstance(tree, dict):
+ for v in tree.values():
+ shapes.extend(_fetch_dims(v))
+ elif isinstance(tree, (list, tuple)):
+ for t in tree:
+ shapes.extend(_fetch_dims(t))
+ elif isinstance(tree, torch.Tensor):
+ shapes.append(tree.shape)
+ else:
+ raise ValueError("Not supported")
+
+ return shapes
+
+
+@torch.jit.ignore
+def _flat_idx_to_idx(flat_idx: int, dims: Tuple[int, ...]) -> Tuple[int, ...]:
+ idx = []
+ for d in reversed(dims):
+ idx.append(flat_idx % d)
+ flat_idx = flat_idx // d
+
+ return tuple(reversed(idx))
+
+
+@torch.jit.ignore
+def _get_minimal_slice_set(
+ start: Sequence[int],
+ end: Sequence[int],
+ dims: Sequence[int],
+ start_edges: Optional[Sequence[bool]] = None,
+ end_edges: Optional[Sequence[bool]] = None,
+) -> List[Tuple[slice, ...]]:
+ """
+ Produces an ordered sequence of tensor slices that, when used in sequence on a tensor with shape dims, yields
+ tensors that contain every leaf in the contiguous range [start, end]. Care is taken to yield a short sequence of
+ slices, and perhaps even the shortest possible (I'm pretty sure it's the latter).
+
+ end is INCLUSIVE.
+ """
+
+ # start_edges and end_edges both indicate whether, starting from any given
+ # dimension, the start/end index is at the top/bottom edge of the
+ # corresponding tensor, modeled as a tree
+ def reduce_edge_list(l: List[bool]) -> None:
+ tally = True
+ for i in range(len(l)):
+ reversed_idx = -1 * (i + 1)
+ l[reversed_idx] &= tally
+ tally = l[reversed_idx]
+
+ if start_edges is None:
+ start_edges = [s == 0 for s in start]
+ reduce_edge_list(start_edges)
+ if end_edges is None:
+ end_edges = [e == (d - 1) for e, d in zip(end, dims)]
+ reduce_edge_list(end_edges)
+
+ # Base cases. Either start/end are empty and we're done, or the final,
+ # one-dimensional tensor can be simply sliced
+ if len(start) == 0:
+ return [()]
+ elif len(start) == 1:
+ return [(slice(start[0], end[0] + 1),)]
+
+ slices: List[Tuple[slice, ...]] = []
+ path_list: List[slice] = []
+
+ # Dimensions common to start and end can be selected directly
+ for s, e in zip(start, end):
+ if s == e:
+ path_list.append(slice(s, s + 1))
+ else:
+ break
+
+ path: Tuple[slice, ...] = tuple(path_list)
+ divergence_idx = len(path)
+
+ # start == end, and we're done
+ if divergence_idx == len(dims):
+ return [path]
+
+ def upper() -> Tuple[Tuple[slice, ...], ...]:
+ assert start_edges is not None
+ assert end_edges is not None
+
+ sdi = start[divergence_idx]
+ return tuple(
+ path + (slice(sdi, sdi + 1),) + s
+ for s in _get_minimal_slice_set(
+ start[divergence_idx + 1 :],
+ [d - 1 for d in dims[divergence_idx + 1 :]],
+ dims[divergence_idx + 1 :],
+ start_edges=start_edges[divergence_idx + 1 :],
+ end_edges=[True for _ in end_edges[divergence_idx + 1 :]],
+ )
+ )
+
+ def lower() -> Tuple[Tuple[slice, ...], ...]:
+ assert start_edges is not None
+ assert end_edges is not None
+
+ edi = end[divergence_idx]
+ return tuple(
+ path + (slice(edi, edi + 1),) + s
+ for s in _get_minimal_slice_set(
+ [0 for _ in start[divergence_idx + 1 :]],
+ end[divergence_idx + 1 :],
+ dims[divergence_idx + 1 :],
+ start_edges=[True for _ in start_edges[divergence_idx + 1 :]],
+ end_edges=end_edges[divergence_idx + 1 :],
+ )
+ )
+
+ # If both start and end are at the edges of the subtree rooted at
+ # divergence_idx, we can just select the whole subtree at once
+ if start_edges[divergence_idx] and end_edges[divergence_idx]:
+ slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1),))
+ # If just start is at the edge, we can grab almost all of the subtree,
+ # treating only the ragged bottom edge as an edge case
+ elif start_edges[divergence_idx]:
+ slices.append(path + (slice(start[divergence_idx], end[divergence_idx]),))
+ slices.extend(lower())
+ # Analogous to the previous case, but the top is ragged this time
+ elif end_edges[divergence_idx]:
+ slices.extend(upper())
+ slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1),))
+ # If both sides of the range are ragged, we need to handle both sides
+ # separately. If there's contiguous meat in between them, we can index it
+ # in one big chunk
+ else:
+ slices.extend(upper())
+ middle_ground = end[divergence_idx] - start[divergence_idx]
+ if middle_ground > 1:
+ slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx]),))
+ slices.extend(lower())
+
+ return slices
+
+
+@torch.jit.ignore
+def _chunk_slice(t: torch.Tensor, flat_start: int, flat_end: int, no_batch_dims: int) -> torch.Tensor:
+ """
+ Equivalent to
+
+ t.reshape((-1,) + t.shape[no_batch_dims:])[flat_start:flat_end]
+
+ but without the need for the initial reshape call, which can be memory-intensive in certain situations. The only
+ reshape operations in this function are performed on sub-tensors that scale with (flat_end - flat_start), the chunk
+ size.
+ """
+
+ batch_dims = t.shape[:no_batch_dims]
+ start_idx = list(_flat_idx_to_idx(flat_start, batch_dims))
+ # _get_minimal_slice_set is inclusive
+ end_idx = list(_flat_idx_to_idx(flat_end - 1, batch_dims))
+
+ # Get an ordered list of slices to perform
+ slices = _get_minimal_slice_set(
+ start_idx,
+ end_idx,
+ batch_dims,
+ )
+
+ sliced_tensors = [t[s] for s in slices]
+
+ return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
+
+
+def chunk_layer(
+ layer: Callable,
+ inputs: Dict[str, Any],
+ chunk_size: int,
+ no_batch_dims: int,
+ low_mem: bool = False,
+ _out: Any = None,
+ _add_into_out: bool = False,
+) -> Any:
+ """
+ Implements the "chunking" procedure described in section 1.11.8.
+
+ Layer outputs and inputs are assumed to be simple "pytrees," consisting only of (arbitrarily nested) lists, tuples,
+ and dicts with torch.Tensor leaves.
+
+ Args:
+ layer:
+ The layer to be applied chunk-wise
+ inputs:
+ A (non-nested) dictionary of keyworded inputs. All leaves must be tensors and must share the same batch
+ dimensions.
+ chunk_size:
+ The number of sub-batches per chunk. If multiple batch dimensions are specified, a "sub-batch" is defined
+ as a single indexing of all batch dimensions simultaneously (s.t. the number of sub-batches is the product
+ of the batch dimensions).
+ no_batch_dims:
+ How many of the initial dimensions of each input tensor can be considered batch dimensions.
+ low_mem:
+ Avoids flattening potentially large input tensors. Unnecessary in most cases, and is ever so slightly
+ slower than the default setting.
+ Returns:
+ The reassembled output of the layer on the inputs.
+ """
+ if not (len(inputs) > 0):
+ raise ValueError("Must provide at least one input")
+
+ initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)]
+ orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)])
+
+ def _prep_inputs(t: torch.Tensor) -> torch.Tensor:
+ if not low_mem:
+ if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
+ t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
+ t = t.reshape(-1, *t.shape[no_batch_dims:])
+ else:
+ t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
+ return t
+
+ prepped_inputs: Dict[str, Any] = tensor_tree_map(_prep_inputs, inputs)
+ prepped_outputs = None
+ if _out is not None:
+ prepped_outputs = tensor_tree_map(lambda t: t.view([-1] + list(t.shape[no_batch_dims:])), _out)
+
+ flat_batch_dim = 1
+ for d in orig_batch_dims:
+ flat_batch_dim *= d
+
+ no_chunks = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
+
+ def _select_chunk(t: torch.Tensor) -> torch.Tensor:
+ return t[i : i + chunk_size] if t.shape[0] != 1 else t
+
+ i = 0
+ out = prepped_outputs
+ for _ in range(no_chunks):
+ # Chunk the input
+ if not low_mem:
+ select_chunk = _select_chunk
+ else:
+ select_chunk = partial(
+ _chunk_slice,
+ flat_start=i,
+ flat_end=min(flat_batch_dim, i + chunk_size),
+ no_batch_dims=len(orig_batch_dims),
+ )
+
+ chunks: Dict[str, Any] = tensor_tree_map(select_chunk, prepped_inputs)
+
+ # Run the layer on the chunk
+ output_chunk = layer(**chunks)
+
+ # Allocate space for the output
+ if out is None:
+ out = tensor_tree_map(lambda t: t.new_zeros((flat_batch_dim,) + t.shape[1:]), output_chunk)
+
+ # Put the chunk in its pre-allocated space
+ if isinstance(output_chunk, dict):
+
+ def assign(d1: dict, d2: dict) -> None:
+ for k, v in d1.items():
+ if isinstance(v, dict):
+ assign(v, d2[k])
+ else:
+ if _add_into_out:
+ v[i : i + chunk_size] += d2[k]
+ else:
+ v[i : i + chunk_size] = d2[k]
+
+ assign(out, output_chunk)
+ elif isinstance(output_chunk, tuple):
+ for x1, x2 in zip(out, output_chunk):
+ if _add_into_out:
+ x1[i : i + chunk_size] += x2
+ else:
+ x1[i : i + chunk_size] = x2
+ elif isinstance(output_chunk, torch.Tensor):
+ if _add_into_out:
+ out[i : i + chunk_size] += output_chunk
+ else:
+ out[i : i + chunk_size] = output_chunk
+ else:
+ raise ValueError("Not supported")
+
+ i += chunk_size
+
+ out = tensor_tree_map(lambda t: t.view(orig_batch_dims + t.shape[1:]), out)
+
+ return out
+
+
+class ChunkSizeTuner:
+ def __init__(
+ self,
+ # Heuristically, runtimes for most of the modules in the network
+ # plateau earlier than this on all GPUs I've run the model on.
+ max_chunk_size: int = 512,
+ ):
+ self.max_chunk_size = max_chunk_size
+ self.cached_chunk_size: Optional[int] = None
+ self.cached_arg_data: Optional[tuple] = None
+
+ def _determine_favorable_chunk_size(self, fn: Callable, args: tuple, min_chunk_size: int) -> int:
+ logging.info("Tuning chunk size...")
+
+ if min_chunk_size >= self.max_chunk_size:
+ return min_chunk_size
+
+ candidates: List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)]
+ candidates = [c for c in candidates if c > min_chunk_size]
+ candidates = [min_chunk_size] + candidates
+ candidates[-1] += 4
+
+ def test_chunk_size(chunk_size: int) -> bool:
+ try:
+ with torch.no_grad():
+ fn(*args, chunk_size=chunk_size)
+ return True
+ except RuntimeError:
+ return False
+
+ min_viable_chunk_size_index = 0
+ i = len(candidates) - 1
+ while i > min_viable_chunk_size_index:
+ viable = test_chunk_size(candidates[i])
+ if not viable:
+ i = (min_viable_chunk_size_index + i) // 2
+ else:
+ min_viable_chunk_size_index = i
+ i = (i + len(candidates) - 1) // 2
+
+ return candidates[min_viable_chunk_size_index]
+
+ def _compare_arg_caches(self, ac1: Iterable, ac2: Iterable) -> bool:
+ consistent = True
+ for a1, a2 in zip(ac1, ac2):
+ assert type(ac1) == type(ac2)
+ if isinstance(ac1, (list, tuple)):
+ consistent &= self._compare_arg_caches(a1, a2)
+ elif isinstance(ac1, dict):
+ a1_items = [v for _, v in sorted(a1.items(), key=lambda x: x[0])]
+ a2_items = [v for _, v in sorted(a2.items(), key=lambda x: x[0])]
+ consistent &= self._compare_arg_caches(a1_items, a2_items)
+ else:
+ consistent &= a1 == a2
+
+ return consistent
+
+ def tune_chunk_size(
+ self,
+ representative_fn: Callable,
+ args: tuple,
+ min_chunk_size: int,
+ ) -> int:
+ consistent = True
+ arg_data: tuple = tree_map(lambda a: a.shape if isinstance(a, torch.Tensor) else a, args, object)
+ if self.cached_arg_data is not None:
+ # If args have changed shape/value, we need to re-tune
+ assert len(self.cached_arg_data) == len(arg_data)
+ consistent = self._compare_arg_caches(self.cached_arg_data, arg_data)
+ else:
+ # Otherwise, we can reuse the precomputed value
+ consistent = False
+
+ if not consistent:
+ self.cached_chunk_size = self._determine_favorable_chunk_size(
+ representative_fn,
+ args,
+ min_chunk_size,
+ )
+ self.cached_arg_data = arg_data
+
+ assert self.cached_chunk_size is not None
+
+ return self.cached_chunk_size
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/data_transforms.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/data_transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d4c17589ae66df2a8fd0ccfe8d6e335004eed9a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/data_transforms.py
@@ -0,0 +1,93 @@
+# Copyright 2021 AlQuraishi Laboratory
+# Copyright 2021 DeepMind Technologies Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict
+
+import numpy as np
+import torch
+
+from . import residue_constants as rc
+from .tensor_utils import tensor_tree_map, tree_map
+
+
+def make_atom14_masks(protein: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
+ """Construct denser atom positions (14 dimensions instead of 37)."""
+ restype_atom14_to_atom37_list = []
+ restype_atom37_to_atom14_list = []
+ restype_atom14_mask_list = []
+
+ for rt in rc.restypes:
+ atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]]
+ restype_atom14_to_atom37_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
+ atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
+ restype_atom37_to_atom14_list.append(
+ [(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in rc.atom_types]
+ )
+
+ restype_atom14_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
+
+ # Add dummy mapping for restype 'UNK'
+ restype_atom14_to_atom37_list.append([0] * 14)
+ restype_atom37_to_atom14_list.append([0] * 37)
+ restype_atom14_mask_list.append([0.0] * 14)
+
+ restype_atom14_to_atom37 = torch.tensor(
+ restype_atom14_to_atom37_list,
+ dtype=torch.int32,
+ device=protein["aatype"].device,
+ )
+ restype_atom37_to_atom14 = torch.tensor(
+ restype_atom37_to_atom14_list,
+ dtype=torch.int32,
+ device=protein["aatype"].device,
+ )
+ restype_atom14_mask = torch.tensor(
+ restype_atom14_mask_list,
+ dtype=torch.float32,
+ device=protein["aatype"].device,
+ )
+ protein_aatype = protein["aatype"].to(torch.long)
+
+ # create the mapping for (residx, atom14) --> atom37, i.e. an array
+ # with shape (num_res, 14) containing the atom37 indices for this protein
+ residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype]
+ residx_atom14_mask = restype_atom14_mask[protein_aatype]
+
+ protein["atom14_atom_exists"] = residx_atom14_mask
+ protein["residx_atom14_to_atom37"] = residx_atom14_to_atom37.long()
+
+ # create the gather indices for mapping back
+ residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype]
+ protein["residx_atom37_to_atom14"] = residx_atom37_to_atom14.long()
+
+ # create the corresponding mask
+ restype_atom37_mask = torch.zeros([21, 37], dtype=torch.float32, device=protein["aatype"].device)
+ for restype, restype_letter in enumerate(rc.restypes):
+ restype_name = rc.restype_1to3[restype_letter]
+ atom_names = rc.residue_atoms[restype_name]
+ for atom_name in atom_names:
+ atom_type = rc.atom_order[atom_name]
+ restype_atom37_mask[restype, atom_type] = 1
+
+ residx_atom37_mask = restype_atom37_mask[protein_aatype]
+ protein["atom37_atom_exists"] = residx_atom37_mask
+
+ return protein
+
+
+def make_atom14_masks_np(batch: Dict[str, torch.Tensor]) -> Dict[str, np.ndarray]:
+ batch = tree_map(lambda n: torch.tensor(n, device=batch["aatype"].device), batch, np.ndarray)
+ out = tensor_tree_map(lambda t: np.array(t), make_atom14_masks(batch))
+ return out
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/feats.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/feats.py
new file mode 100644
index 0000000000000000000000000000000000000000..18b01a1fecaccfaafd93f8a269eff6ede752ccb1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/feats.py
@@ -0,0 +1,255 @@
+# Copyright 2021 AlQuraishi Laboratory
+# Copyright 2021 DeepMind Technologies Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Tuple, overload
+
+import torch
+import torch.types
+from torch import nn
+
+from . import residue_constants as rc
+from .rigid_utils import Rigid, Rotation
+from .tensor_utils import batched_gather
+
+
+@overload
+def pseudo_beta_fn(aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: None) -> torch.Tensor:
+ ...
+
+
+@overload
+def pseudo_beta_fn(
+ aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: torch.Tensor
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ ...
+
+
+def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
+ is_gly = aatype == rc.restype_order["G"]
+ ca_idx = rc.atom_order["CA"]
+ cb_idx = rc.atom_order["CB"]
+ pseudo_beta = torch.where(
+ is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
+ all_atom_positions[..., ca_idx, :],
+ all_atom_positions[..., cb_idx, :],
+ )
+
+ if all_atom_masks is not None:
+ pseudo_beta_mask = torch.where(
+ is_gly,
+ all_atom_masks[..., ca_idx],
+ all_atom_masks[..., cb_idx],
+ )
+ return pseudo_beta, pseudo_beta_mask
+ else:
+ return pseudo_beta
+
+
+def atom14_to_atom37(atom14: torch.Tensor, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
+ atom37_data = batched_gather(
+ atom14,
+ batch["residx_atom37_to_atom14"],
+ dim=-2,
+ no_batch_dims=len(atom14.shape[:-2]),
+ )
+
+ atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
+
+ return atom37_data
+
+
+def build_template_angle_feat(template_feats: Dict[str, torch.Tensor]) -> torch.Tensor:
+ template_aatype = template_feats["template_aatype"]
+ torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"]
+ alt_torsion_angles_sin_cos = template_feats["template_alt_torsion_angles_sin_cos"]
+ torsion_angles_mask = template_feats["template_torsion_angles_mask"]
+ template_angle_feat = torch.cat(
+ [
+ nn.functional.one_hot(template_aatype, 22),
+ torsion_angles_sin_cos.reshape(*torsion_angles_sin_cos.shape[:-2], 14),
+ alt_torsion_angles_sin_cos.reshape(*alt_torsion_angles_sin_cos.shape[:-2], 14),
+ torsion_angles_mask,
+ ],
+ dim=-1,
+ )
+
+ return template_angle_feat
+
+
+def build_template_pair_feat(
+ batch: Dict[str, torch.Tensor],
+ min_bin: torch.types.Number,
+ max_bin: torch.types.Number,
+ no_bins: int,
+ use_unit_vector: bool = False,
+ eps: float = 1e-20,
+ inf: float = 1e8,
+) -> torch.Tensor:
+ template_mask = batch["template_pseudo_beta_mask"]
+ template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
+
+ # Compute distogram (this seems to differ slightly from Alg. 5)
+ tpb = batch["template_pseudo_beta"]
+ dgram = torch.sum((tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True)
+ lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2
+ upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
+ dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
+
+ to_concat = [dgram, template_mask_2d[..., None]]
+
+ aatype_one_hot: torch.LongTensor = nn.functional.one_hot(
+ batch["template_aatype"],
+ rc.restype_num + 2,
+ )
+
+ n_res = batch["template_aatype"].shape[-1]
+ to_concat.append(aatype_one_hot[..., None, :, :].expand(*aatype_one_hot.shape[:-2], n_res, -1, -1))
+ to_concat.append(aatype_one_hot[..., None, :].expand(*aatype_one_hot.shape[:-2], -1, n_res, -1))
+
+ n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]]
+ rigids = Rigid.make_transform_from_reference(
+ n_xyz=batch["template_all_atom_positions"][..., n, :],
+ ca_xyz=batch["template_all_atom_positions"][..., ca, :],
+ c_xyz=batch["template_all_atom_positions"][..., c, :],
+ eps=eps,
+ )
+ points = rigids.get_trans()[..., None, :, :]
+ rigid_vec = rigids[..., None].invert_apply(points)
+
+ inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec**2, dim=-1))
+
+ t_aa_masks = batch["template_all_atom_mask"]
+ template_mask = t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c]
+ template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
+
+ inv_distance_scalar = inv_distance_scalar * template_mask_2d
+ unit_vector = rigid_vec * inv_distance_scalar[..., None]
+
+ if not use_unit_vector:
+ unit_vector = unit_vector * 0.0
+
+ to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1))
+ to_concat.append(template_mask_2d[..., None])
+
+ act = torch.cat(to_concat, dim=-1)
+ act = act * template_mask_2d[..., None]
+
+ return act
+
+
+def build_extra_msa_feat(batch: Dict[str, torch.Tensor]) -> torch.Tensor:
+ msa_1hot: torch.LongTensor = nn.functional.one_hot(batch["extra_msa"], 23)
+ msa_feat = [
+ msa_1hot,
+ batch["extra_has_deletion"].unsqueeze(-1),
+ batch["extra_deletion_value"].unsqueeze(-1),
+ ]
+ return torch.cat(msa_feat, dim=-1)
+
+
+def torsion_angles_to_frames(
+ r: Rigid,
+ alpha: torch.Tensor,
+ aatype: torch.Tensor,
+ rrgdf: torch.Tensor,
+) -> Rigid:
+ # [*, N, 8, 4, 4]
+ default_4x4 = rrgdf[aatype, ...]
+
+ # [*, N, 8] transformations, i.e.
+ # One [*, N, 8, 3, 3] rotation matrix and
+ # One [*, N, 8, 3] translation matrix
+ default_r = r.from_tensor_4x4(default_4x4)
+
+ bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))
+ bb_rot[..., 1] = 1
+
+ # [*, N, 8, 2]
+ alpha = torch.cat([bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2)
+
+ # [*, N, 8, 3, 3]
+ # Produces rotation matrices of the form:
+ # [
+ # [1, 0 , 0 ],
+ # [0, a_2,-a_1],
+ # [0, a_1, a_2]
+ # ]
+ # This follows the original code rather than the supplement, which uses
+ # different indices.
+
+ all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)
+ all_rots[..., 0, 0] = 1
+ all_rots[..., 1, 1] = alpha[..., 1]
+ all_rots[..., 1, 2] = -alpha[..., 0]
+ all_rots[..., 2, 1:] = alpha
+
+ all_frames = default_r.compose(Rigid(Rotation(rot_mats=all_rots), None))
+
+ chi2_frame_to_frame = all_frames[..., 5]
+ chi3_frame_to_frame = all_frames[..., 6]
+ chi4_frame_to_frame = all_frames[..., 7]
+
+ chi1_frame_to_bb = all_frames[..., 4]
+ chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)
+ chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)
+ chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)
+
+ all_frames_to_bb = Rigid.cat(
+ [
+ all_frames[..., :5],
+ chi2_frame_to_bb.unsqueeze(-1),
+ chi3_frame_to_bb.unsqueeze(-1),
+ chi4_frame_to_bb.unsqueeze(-1),
+ ],
+ dim=-1,
+ )
+
+ all_frames_to_global = r[..., None].compose(all_frames_to_bb)
+
+ return all_frames_to_global
+
+
+def frames_and_literature_positions_to_atom14_pos(
+ r: Rigid,
+ aatype: torch.Tensor,
+ default_frames: torch.Tensor,
+ group_idx: torch.Tensor,
+ atom_mask: torch.Tensor,
+ lit_positions: torch.Tensor,
+) -> torch.Tensor:
+ # [*, N, 14]
+ group_mask = group_idx[aatype, ...]
+
+ # [*, N, 14, 8]
+ group_mask_one_hot: torch.LongTensor = nn.functional.one_hot(
+ group_mask,
+ num_classes=default_frames.shape[-3],
+ )
+
+ # [*, N, 14, 8]
+ t_atoms_to_global = r[..., None, :] * group_mask_one_hot
+
+ # [*, N, 14]
+ t_atoms_to_global = t_atoms_to_global.map_tensor_fn(lambda x: torch.sum(x, dim=-1))
+
+ # [*, N, 14, 1]
+ atom_mask = atom_mask[aatype, ...].unsqueeze(-1)
+
+ # [*, N, 14, 3]
+ lit_positions = lit_positions[aatype, ...]
+ pred_positions = t_atoms_to_global.apply(lit_positions)
+ pred_positions = pred_positions * atom_mask
+
+ return pred_positions
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/loss.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c442786dc82ba2ebe243923509ed76a40de2a01
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/loss.py
@@ -0,0 +1,105 @@
+# Copyright 2021 AlQuraishi Laboratory
+# Copyright 2021 DeepMind Technologies Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict, Optional, Tuple
+
+import torch
+
+
+def _calculate_bin_centers(boundaries: torch.Tensor) -> torch.Tensor:
+ step = boundaries[1] - boundaries[0]
+ bin_centers = boundaries + step / 2
+ bin_centers = torch.cat([bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0)
+ return bin_centers
+
+
+def _calculate_expected_aligned_error(
+ alignment_confidence_breaks: torch.Tensor,
+ aligned_distance_error_probs: torch.Tensor,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
+ return (
+ torch.sum(aligned_distance_error_probs * bin_centers, dim=-1),
+ bin_centers[-1],
+ )
+
+
+def compute_predicted_aligned_error(
+ logits: torch.Tensor,
+ max_bin: int = 31,
+ no_bins: int = 64,
+ **kwargs,
+) -> Dict[str, torch.Tensor]:
+ """Computes aligned confidence metrics from logits.
+
+ Args:
+ logits: [*, num_res, num_res, num_bins] the logits output from
+ PredictedAlignedErrorHead.
+ max_bin: Maximum bin value
+ no_bins: Number of bins
+ Returns:
+ aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted
+ aligned error probabilities over bins for each residue pair.
+ predicted_aligned_error: [*, num_res, num_res] the expected aligned distance
+ error for each pair of residues.
+ max_predicted_aligned_error: [*] the maximum predicted error possible.
+ """
+ boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
+
+ aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)
+ predicted_aligned_error, max_predicted_aligned_error = _calculate_expected_aligned_error(
+ alignment_confidence_breaks=boundaries,
+ aligned_distance_error_probs=aligned_confidence_probs,
+ )
+
+ return {
+ "aligned_confidence_probs": aligned_confidence_probs,
+ "predicted_aligned_error": predicted_aligned_error,
+ "max_predicted_aligned_error": max_predicted_aligned_error,
+ }
+
+
+def compute_tm(
+ logits: torch.Tensor,
+ residue_weights: Optional[torch.Tensor] = None,
+ max_bin: int = 31,
+ no_bins: int = 64,
+ eps: float = 1e-8,
+ **kwargs,
+) -> torch.Tensor:
+ if residue_weights is None:
+ residue_weights = logits.new_ones(logits.shape[-2])
+
+ boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
+
+ bin_centers = _calculate_bin_centers(boundaries)
+ torch.sum(residue_weights)
+ n = logits.shape[-2]
+ clipped_n = max(n, 19)
+
+ d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8
+
+ probs = torch.nn.functional.softmax(logits, dim=-1)
+
+ tm_per_bin = 1.0 / (1 + (bin_centers**2) / (d0**2))
+ predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1)
+
+ normed_residue_mask = residue_weights / (eps + residue_weights.sum())
+ per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1)
+
+ weighted = per_alignment * residue_weights
+
+ argmax = (weighted == torch.max(weighted)).nonzero()[0]
+ return per_alignment[tuple(argmax)]
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/protein.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/protein.py
new file mode 100644
index 0000000000000000000000000000000000000000..32e01571715c1b0c806e9cb764b2dec8aaab6068
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/protein.py
@@ -0,0 +1,329 @@
+# Copyright 2021 AlQuraishi Laboratory
+# Copyright 2021 DeepMind Technologies Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Protein data type."""
+import dataclasses
+import re
+import string
+from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
+
+import numpy as np
+
+from . import residue_constants
+
+
+FeatureDict = Mapping[str, np.ndarray]
+ModelOutput = Mapping[str, Any] # Is a nested dict.
+PICO_TO_ANGSTROM = 0.01
+
+
+@dataclasses.dataclass(frozen=True)
+class Protein:
+ """Protein structure representation."""
+
+ # Cartesian coordinates of atoms in angstroms. The atom types correspond to
+ # residue_constants.atom_types, i.e. the first three are N, CA, CB.
+ atom_positions: np.ndarray # [num_res, num_atom_type, 3]
+
+ # Amino-acid type for each residue represented as an integer between 0 and
+ # 20, where 20 is 'X'.
+ aatype: np.ndarray # [num_res]
+
+ # Binary float mask to indicate presence of a particular atom. 1.0 if an atom
+ # is present and 0.0 if not. This should be used for loss masking.
+ atom_mask: np.ndarray # [num_res, num_atom_type]
+
+ # Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
+ residue_index: np.ndarray # [num_res]
+
+ # B-factors, or temperature factors, of each residue (in sq. angstroms units),
+ # representing the displacement of the residue from its ground truth mean
+ # value.
+ b_factors: np.ndarray # [num_res, num_atom_type]
+
+ # Chain indices for multi-chain predictions
+ chain_index: Optional[np.ndarray] = None
+
+ # Optional remark about the protein. Included as a comment in output PDB
+ # files
+ remark: Optional[str] = None
+
+ # Templates used to generate this protein (prediction-only)
+ parents: Optional[Sequence[str]] = None
+
+ # Chain corresponding to each parent
+ parents_chain_index: Optional[Sequence[int]] = None
+
+
+def from_proteinnet_string(proteinnet_str: str) -> Protein:
+ tag_re = r"(\[[A-Z]+\]\n)"
+ tags: List[str] = [tag.strip() for tag in re.split(tag_re, proteinnet_str) if len(tag) > 0]
+ groups: Iterator[Tuple[str, List[str]]] = zip(tags[0::2], [l.split("\n") for l in tags[1::2]])
+
+ atoms: List[str] = ["N", "CA", "C"]
+ aatype = None
+ atom_positions = None
+ atom_mask = None
+ for g in groups:
+ if "[PRIMARY]" == g[0]:
+ seq = g[1][0].strip()
+ for i in range(len(seq)):
+ if seq[i] not in residue_constants.restypes:
+ seq[i] = "X" # FIXME: strings are immutable
+ aatype = np.array(
+ [residue_constants.restype_order.get(res_symbol, residue_constants.restype_num) for res_symbol in seq]
+ )
+ elif "[TERTIARY]" == g[0]:
+ tertiary: List[List[float]] = []
+ for axis in range(3):
+ tertiary.append(list(map(float, g[1][axis].split())))
+ tertiary_np = np.array(tertiary)
+ atom_positions = np.zeros((len(tertiary[0]) // 3, residue_constants.atom_type_num, 3)).astype(np.float32)
+ for i, atom in enumerate(atoms):
+ atom_positions[:, residue_constants.atom_order[atom], :] = np.transpose(tertiary_np[:, i::3])
+ atom_positions *= PICO_TO_ANGSTROM
+ elif "[MASK]" == g[0]:
+ mask = np.array(list(map({"-": 0, "+": 1}.get, g[1][0].strip())))
+ atom_mask = np.zeros(
+ (
+ len(mask),
+ residue_constants.atom_type_num,
+ )
+ ).astype(np.float32)
+ for i, atom in enumerate(atoms):
+ atom_mask[:, residue_constants.atom_order[atom]] = 1
+ atom_mask *= mask[..., None]
+
+ assert aatype is not None
+
+ return Protein(
+ atom_positions=atom_positions,
+ atom_mask=atom_mask,
+ aatype=aatype,
+ residue_index=np.arange(len(aatype)),
+ b_factors=None,
+ )
+
+
+def get_pdb_headers(prot: Protein, chain_id: int = 0) -> List[str]:
+ pdb_headers: List[str] = []
+
+ remark = prot.remark
+ if remark is not None:
+ pdb_headers.append(f"REMARK {remark}")
+
+ parents = prot.parents
+ parents_chain_index = prot.parents_chain_index
+ if parents is not None and parents_chain_index is not None:
+ parents = [p for i, p in zip(parents_chain_index, parents) if i == chain_id]
+
+ if parents is None or len(parents) == 0:
+ parents = ["N/A"]
+
+ pdb_headers.append(f"PARENT {' '.join(parents)}")
+
+ return pdb_headers
+
+
+def add_pdb_headers(prot: Protein, pdb_str: str) -> str:
+ """Add pdb headers to an existing PDB string. Useful during multi-chain
+ recycling
+ """
+ out_pdb_lines: List[str] = []
+ lines = pdb_str.split("\n")
+
+ remark = prot.remark
+ if remark is not None:
+ out_pdb_lines.append(f"REMARK {remark}")
+
+ parents_per_chain: List[List[str]]
+ if prot.parents is not None and len(prot.parents) > 0:
+ parents_per_chain = []
+ if prot.parents_chain_index is not None:
+ parent_dict: Dict[str, List[str]] = {}
+ for p, i in zip(prot.parents, prot.parents_chain_index):
+ parent_dict.setdefault(str(i), [])
+ parent_dict[str(i)].append(p)
+
+ max_idx = max([int(chain_idx) for chain_idx in parent_dict])
+ for i in range(max_idx + 1):
+ chain_parents = parent_dict.get(str(i), ["N/A"])
+ parents_per_chain.append(chain_parents)
+ else:
+ parents_per_chain.append(list(prot.parents))
+ else:
+ parents_per_chain = [["N/A"]]
+
+ def make_parent_line(p: Sequence[str]) -> str:
+ return f"PARENT {' '.join(p)}"
+
+ out_pdb_lines.append(make_parent_line(parents_per_chain[0]))
+
+ chain_counter = 0
+ for i, l in enumerate(lines):
+ if "PARENT" not in l and "REMARK" not in l:
+ out_pdb_lines.append(l)
+ if "TER" in l and "END" not in lines[i + 1]:
+ chain_counter += 1
+ if not chain_counter >= len(parents_per_chain):
+ chain_parents = parents_per_chain[chain_counter]
+ else:
+ chain_parents = ["N/A"]
+
+ out_pdb_lines.append(make_parent_line(chain_parents))
+
+ return "\n".join(out_pdb_lines)
+
+
+def to_pdb(prot: Protein) -> str:
+ """Converts a `Protein` instance to a PDB string.
+
+ Args:
+ prot: The protein to convert to PDB.
+
+ Returns:
+ PDB string.
+ """
+ restypes = residue_constants.restypes + ["X"]
+
+ def res_1to3(r: int) -> str:
+ return residue_constants.restype_1to3.get(restypes[r], "UNK")
+
+ atom_types = residue_constants.atom_types
+
+ pdb_lines: List[str] = []
+
+ atom_mask = prot.atom_mask
+ aatype = prot.aatype
+ atom_positions = prot.atom_positions
+ residue_index = prot.residue_index.astype(np.int32)
+ b_factors = prot.b_factors
+ chain_index = prot.chain_index
+
+ if np.any(aatype > residue_constants.restype_num):
+ raise ValueError("Invalid aatypes.")
+
+ headers = get_pdb_headers(prot)
+ if len(headers) > 0:
+ pdb_lines.extend(headers)
+
+ n = aatype.shape[0]
+ atom_index = 1
+ prev_chain_index = 0
+ chain_tags = string.ascii_uppercase
+ chain_tag = None
+ # Add all atom sites.
+ for i in range(n):
+ res_name_3 = res_1to3(aatype[i])
+ for atom_name, pos, mask, b_factor in zip(atom_types, atom_positions[i], atom_mask[i], b_factors[i]):
+ if mask < 0.5:
+ continue
+
+ record_type = "ATOM"
+ name = atom_name if len(atom_name) == 4 else f" {atom_name}"
+ alt_loc = ""
+ insertion_code = ""
+ occupancy = 1.00
+ element = atom_name[0] # Protein supports only C, N, O, S, this works.
+ charge = ""
+
+ chain_tag = "A"
+ if chain_index is not None:
+ chain_tag = chain_tags[chain_index[i]]
+
+ # PDB is a columnar format, every space matters here!
+ atom_line = (
+ f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
+ f"{res_name_3:>3} {chain_tag:>1}"
+ f"{residue_index[i]:>4}{insertion_code:>1} "
+ f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
+ f"{occupancy:>6.2f}{b_factor:>6.2f} "
+ f"{element:>2}{charge:>2}"
+ )
+ pdb_lines.append(atom_line)
+ atom_index += 1
+
+ should_terminate = i == n - 1
+ if chain_index is not None:
+ if i != n - 1 and chain_index[i + 1] != prev_chain_index:
+ should_terminate = True
+ prev_chain_index = chain_index[i + 1]
+
+ if should_terminate:
+ # Close the chain.
+ chain_end = "TER"
+ chain_termination_line = (
+ f"{chain_end:<6}{atom_index:>5} {res_1to3(aatype[i]):>3} {chain_tag:>1}{residue_index[i]:>4}"
+ )
+ pdb_lines.append(chain_termination_line)
+ atom_index += 1
+
+ if i != n - 1:
+ # "prev" is a misnomer here. This happens at the beginning of
+ # each new chain.
+ pdb_lines.extend(get_pdb_headers(prot, prev_chain_index))
+
+ pdb_lines.append("END")
+ pdb_lines.append("")
+ return "\n".join(pdb_lines)
+
+
+def ideal_atom_mask(prot: Protein) -> np.ndarray:
+ """Computes an ideal atom mask.
+
+ `Protein.atom_mask` typically is defined according to the atoms that are reported in the PDB. This function
+ computes a mask according to heavy atoms that should be present in the given sequence of amino acids.
+
+ Args:
+ prot: `Protein` whose fields are `numpy.ndarray` objects.
+
+ Returns:
+ An ideal atom mask.
+ """
+ return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
+
+
+def from_prediction(
+ features: FeatureDict,
+ result: ModelOutput,
+ b_factors: Optional[np.ndarray] = None,
+ chain_index: Optional[np.ndarray] = None,
+ remark: Optional[str] = None,
+ parents: Optional[Sequence[str]] = None,
+ parents_chain_index: Optional[Sequence[int]] = None,
+) -> Protein:
+ """Assembles a protein from a prediction.
+
+ Args:
+ features: Dictionary holding model inputs.
+ result: Dictionary holding model outputs.
+ b_factors: (Optional) B-factors to use for the protein.
+ chain_index: (Optional) Chain indices for multi-chain predictions
+ remark: (Optional) Remark about the prediction
+ parents: (Optional) List of template names
+ Returns:
+ A protein instance.
+ """
+ return Protein(
+ aatype=features["aatype"],
+ atom_positions=result["final_atom_positions"],
+ atom_mask=result["final_atom_mask"],
+ residue_index=features["residue_index"] + 1,
+ b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"]),
+ chain_index=chain_index,
+ remark=remark,
+ parents=parents,
+ parents_chain_index=parents_chain_index,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/residue_constants.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/residue_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f0ad3b50c65050a4ffd4370e9b4f3a3312fc723
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/residue_constants.py
@@ -0,0 +1,983 @@
+# Copyright 2021 AlQuraishi Laboratory
+# Copyright 2021 DeepMind Technologies Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Constants used in AlphaFold."""
+
+import collections
+import copy
+import functools
+from importlib import resources
+from typing import Dict, List, Mapping, Sequence, Tuple
+
+import numpy as np
+
+
+# Internal import (35fd).
+
+
+# Distance from one CA to next CA [trans configuration: omega = 180].
+ca_ca = 3.80209737096
+
+# Format: The list for each AA type contains chi1, chi2, chi3, chi4 in
+# this order (or a relevant subset from chi1 onwards). ALA and GLY don't have
+# chi angles so their chi angle lists are empty.
+chi_angles_atoms: Dict[str, List[List[str]]] = {
+ "ALA": [],
+ # Chi5 in arginine is always 0 +- 5 degrees, so ignore it.
+ "ARG": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "NE"], ["CG", "CD", "NE", "CZ"]],
+ "ASN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
+ "ASP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
+ "CYS": [["N", "CA", "CB", "SG"]],
+ "GLN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "OE1"]],
+ "GLU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "OE1"]],
+ "GLY": [],
+ "HIS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "ND1"]],
+ "ILE": [["N", "CA", "CB", "CG1"], ["CA", "CB", "CG1", "CD1"]],
+ "LEU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
+ "LYS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "CE"], ["CG", "CD", "CE", "NZ"]],
+ "MET": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "SD"], ["CB", "CG", "SD", "CE"]],
+ "PHE": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
+ "PRO": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"]],
+ "SER": [["N", "CA", "CB", "OG"]],
+ "THR": [["N", "CA", "CB", "OG1"]],
+ "TRP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
+ "TYR": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
+ "VAL": [["N", "CA", "CB", "CG1"]],
+}
+
+# If chi angles given in fixed-length array, this matrix determines how to mask
+# them for each AA type. The order is as per restype_order (see below).
+chi_angles_mask: List[List[float]] = [
+ [0.0, 0.0, 0.0, 0.0], # ALA
+ [1.0, 1.0, 1.0, 1.0], # ARG
+ [1.0, 1.0, 0.0, 0.0], # ASN
+ [1.0, 1.0, 0.0, 0.0], # ASP
+ [1.0, 0.0, 0.0, 0.0], # CYS
+ [1.0, 1.0, 1.0, 0.0], # GLN
+ [1.0, 1.0, 1.0, 0.0], # GLU
+ [0.0, 0.0, 0.0, 0.0], # GLY
+ [1.0, 1.0, 0.0, 0.0], # HIS
+ [1.0, 1.0, 0.0, 0.0], # ILE
+ [1.0, 1.0, 0.0, 0.0], # LEU
+ [1.0, 1.0, 1.0, 1.0], # LYS
+ [1.0, 1.0, 1.0, 0.0], # MET
+ [1.0, 1.0, 0.0, 0.0], # PHE
+ [1.0, 1.0, 0.0, 0.0], # PRO
+ [1.0, 0.0, 0.0, 0.0], # SER
+ [1.0, 0.0, 0.0, 0.0], # THR
+ [1.0, 1.0, 0.0, 0.0], # TRP
+ [1.0, 1.0, 0.0, 0.0], # TYR
+ [1.0, 0.0, 0.0, 0.0], # VAL
+]
+
+# The following chi angles are pi periodic: they can be rotated by a multiple
+# of pi without affecting the structure.
+chi_pi_periodic: List[List[float]] = [
+ [0.0, 0.0, 0.0, 0.0], # ALA
+ [0.0, 0.0, 0.0, 0.0], # ARG
+ [0.0, 0.0, 0.0, 0.0], # ASN
+ [0.0, 1.0, 0.0, 0.0], # ASP
+ [0.0, 0.0, 0.0, 0.0], # CYS
+ [0.0, 0.0, 0.0, 0.0], # GLN
+ [0.0, 0.0, 1.0, 0.0], # GLU
+ [0.0, 0.0, 0.0, 0.0], # GLY
+ [0.0, 0.0, 0.0, 0.0], # HIS
+ [0.0, 0.0, 0.0, 0.0], # ILE
+ [0.0, 0.0, 0.0, 0.0], # LEU
+ [0.0, 0.0, 0.0, 0.0], # LYS
+ [0.0, 0.0, 0.0, 0.0], # MET
+ [0.0, 1.0, 0.0, 0.0], # PHE
+ [0.0, 0.0, 0.0, 0.0], # PRO
+ [0.0, 0.0, 0.0, 0.0], # SER
+ [0.0, 0.0, 0.0, 0.0], # THR
+ [0.0, 0.0, 0.0, 0.0], # TRP
+ [0.0, 1.0, 0.0, 0.0], # TYR
+ [0.0, 0.0, 0.0, 0.0], # VAL
+ [0.0, 0.0, 0.0, 0.0], # UNK
+]
+
+# Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi,
+# psi and chi angles:
+# 0: 'backbone group',
+# 1: 'pre-omega-group', (empty)
+# 2: 'phi-group', (currently empty, because it defines only hydrogens)
+# 3: 'psi-group',
+# 4,5,6,7: 'chi1,2,3,4-group'
+# The atom positions are relative to the axis-end-atom of the corresponding
+# rotation axis. The x-axis is in direction of the rotation axis, and the y-axis
+# is defined such that the dihedral-angle-definiting atom (the last entry in
+# chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate).
+# format: [atomname, group_idx, rel_position]
+rigid_group_atom_positions: Dict[str, List[Tuple[str, int, Tuple[float, float, float]]]] = {
+ "ALA": [
+ ("N", 0, (-0.525, 1.363, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.526, -0.000, -0.000)),
+ ("CB", 0, (-0.529, -0.774, -1.205)),
+ ("O", 3, (0.627, 1.062, 0.000)),
+ ],
+ "ARG": [
+ ("N", 0, (-0.524, 1.362, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.525, -0.000, -0.000)),
+ ("CB", 0, (-0.524, -0.778, -1.209)),
+ ("O", 3, (0.626, 1.062, 0.000)),
+ ("CG", 4, (0.616, 1.390, -0.000)),
+ ("CD", 5, (0.564, 1.414, 0.000)),
+ ("NE", 6, (0.539, 1.357, -0.000)),
+ ("NH1", 7, (0.206, 2.301, 0.000)),
+ ("NH2", 7, (2.078, 0.978, -0.000)),
+ ("CZ", 7, (0.758, 1.093, -0.000)),
+ ],
+ "ASN": [
+ ("N", 0, (-0.536, 1.357, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.526, -0.000, -0.000)),
+ ("CB", 0, (-0.531, -0.787, -1.200)),
+ ("O", 3, (0.625, 1.062, 0.000)),
+ ("CG", 4, (0.584, 1.399, 0.000)),
+ ("ND2", 5, (0.593, -1.188, 0.001)),
+ ("OD1", 5, (0.633, 1.059, 0.000)),
+ ],
+ "ASP": [
+ ("N", 0, (-0.525, 1.362, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.527, 0.000, -0.000)),
+ ("CB", 0, (-0.526, -0.778, -1.208)),
+ ("O", 3, (0.626, 1.062, -0.000)),
+ ("CG", 4, (0.593, 1.398, -0.000)),
+ ("OD1", 5, (0.610, 1.091, 0.000)),
+ ("OD2", 5, (0.592, -1.101, -0.003)),
+ ],
+ "CYS": [
+ ("N", 0, (-0.522, 1.362, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.524, 0.000, 0.000)),
+ ("CB", 0, (-0.519, -0.773, -1.212)),
+ ("O", 3, (0.625, 1.062, -0.000)),
+ ("SG", 4, (0.728, 1.653, 0.000)),
+ ],
+ "GLN": [
+ ("N", 0, (-0.526, 1.361, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.526, 0.000, 0.000)),
+ ("CB", 0, (-0.525, -0.779, -1.207)),
+ ("O", 3, (0.626, 1.062, -0.000)),
+ ("CG", 4, (0.615, 1.393, 0.000)),
+ ("CD", 5, (0.587, 1.399, -0.000)),
+ ("NE2", 6, (0.593, -1.189, -0.001)),
+ ("OE1", 6, (0.634, 1.060, 0.000)),
+ ],
+ "GLU": [
+ ("N", 0, (-0.528, 1.361, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.526, -0.000, -0.000)),
+ ("CB", 0, (-0.526, -0.781, -1.207)),
+ ("O", 3, (0.626, 1.062, 0.000)),
+ ("CG", 4, (0.615, 1.392, 0.000)),
+ ("CD", 5, (0.600, 1.397, 0.000)),
+ ("OE1", 6, (0.607, 1.095, -0.000)),
+ ("OE2", 6, (0.589, -1.104, -0.001)),
+ ],
+ "GLY": [
+ ("N", 0, (-0.572, 1.337, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.517, -0.000, -0.000)),
+ ("O", 3, (0.626, 1.062, -0.000)),
+ ],
+ "HIS": [
+ ("N", 0, (-0.527, 1.360, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.525, 0.000, 0.000)),
+ ("CB", 0, (-0.525, -0.778, -1.208)),
+ ("O", 3, (0.625, 1.063, 0.000)),
+ ("CG", 4, (0.600, 1.370, -0.000)),
+ ("CD2", 5, (0.889, -1.021, 0.003)),
+ ("ND1", 5, (0.744, 1.160, -0.000)),
+ ("CE1", 5, (2.030, 0.851, 0.002)),
+ ("NE2", 5, (2.145, -0.466, 0.004)),
+ ],
+ "ILE": [
+ ("N", 0, (-0.493, 1.373, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.527, -0.000, -0.000)),
+ ("CB", 0, (-0.536, -0.793, -1.213)),
+ ("O", 3, (0.627, 1.062, -0.000)),
+ ("CG1", 4, (0.534, 1.437, -0.000)),
+ ("CG2", 4, (0.540, -0.785, -1.199)),
+ ("CD1", 5, (0.619, 1.391, 0.000)),
+ ],
+ "LEU": [
+ ("N", 0, (-0.520, 1.363, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.525, -0.000, -0.000)),
+ ("CB", 0, (-0.522, -0.773, -1.214)),
+ ("O", 3, (0.625, 1.063, -0.000)),
+ ("CG", 4, (0.678, 1.371, 0.000)),
+ ("CD1", 5, (0.530, 1.430, -0.000)),
+ ("CD2", 5, (0.535, -0.774, 1.200)),
+ ],
+ "LYS": [
+ ("N", 0, (-0.526, 1.362, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.526, 0.000, 0.000)),
+ ("CB", 0, (-0.524, -0.778, -1.208)),
+ ("O", 3, (0.626, 1.062, -0.000)),
+ ("CG", 4, (0.619, 1.390, 0.000)),
+ ("CD", 5, (0.559, 1.417, 0.000)),
+ ("CE", 6, (0.560, 1.416, 0.000)),
+ ("NZ", 7, (0.554, 1.387, 0.000)),
+ ],
+ "MET": [
+ ("N", 0, (-0.521, 1.364, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.525, 0.000, 0.000)),
+ ("CB", 0, (-0.523, -0.776, -1.210)),
+ ("O", 3, (0.625, 1.062, -0.000)),
+ ("CG", 4, (0.613, 1.391, -0.000)),
+ ("SD", 5, (0.703, 1.695, 0.000)),
+ ("CE", 6, (0.320, 1.786, -0.000)),
+ ],
+ "PHE": [
+ ("N", 0, (-0.518, 1.363, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.524, 0.000, -0.000)),
+ ("CB", 0, (-0.525, -0.776, -1.212)),
+ ("O", 3, (0.626, 1.062, -0.000)),
+ ("CG", 4, (0.607, 1.377, 0.000)),
+ ("CD1", 5, (0.709, 1.195, -0.000)),
+ ("CD2", 5, (0.706, -1.196, 0.000)),
+ ("CE1", 5, (2.102, 1.198, -0.000)),
+ ("CE2", 5, (2.098, -1.201, -0.000)),
+ ("CZ", 5, (2.794, -0.003, -0.001)),
+ ],
+ "PRO": [
+ ("N", 0, (-0.566, 1.351, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.527, -0.000, 0.000)),
+ ("CB", 0, (-0.546, -0.611, -1.293)),
+ ("O", 3, (0.621, 1.066, 0.000)),
+ ("CG", 4, (0.382, 1.445, 0.0)),
+ # ('CD', 5, (0.427, 1.440, 0.0)),
+ ("CD", 5, (0.477, 1.424, 0.0)), # manually made angle 2 degrees larger
+ ],
+ "SER": [
+ ("N", 0, (-0.529, 1.360, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.525, -0.000, -0.000)),
+ ("CB", 0, (-0.518, -0.777, -1.211)),
+ ("O", 3, (0.626, 1.062, -0.000)),
+ ("OG", 4, (0.503, 1.325, 0.000)),
+ ],
+ "THR": [
+ ("N", 0, (-0.517, 1.364, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.526, 0.000, -0.000)),
+ ("CB", 0, (-0.516, -0.793, -1.215)),
+ ("O", 3, (0.626, 1.062, 0.000)),
+ ("CG2", 4, (0.550, -0.718, -1.228)),
+ ("OG1", 4, (0.472, 1.353, 0.000)),
+ ],
+ "TRP": [
+ ("N", 0, (-0.521, 1.363, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.525, -0.000, 0.000)),
+ ("CB", 0, (-0.523, -0.776, -1.212)),
+ ("O", 3, (0.627, 1.062, 0.000)),
+ ("CG", 4, (0.609, 1.370, -0.000)),
+ ("CD1", 5, (0.824, 1.091, 0.000)),
+ ("CD2", 5, (0.854, -1.148, -0.005)),
+ ("CE2", 5, (2.186, -0.678, -0.007)),
+ ("CE3", 5, (0.622, -2.530, -0.007)),
+ ("NE1", 5, (2.140, 0.690, -0.004)),
+ ("CH2", 5, (3.028, -2.890, -0.013)),
+ ("CZ2", 5, (3.283, -1.543, -0.011)),
+ ("CZ3", 5, (1.715, -3.389, -0.011)),
+ ],
+ "TYR": [
+ ("N", 0, (-0.522, 1.362, 0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.524, -0.000, -0.000)),
+ ("CB", 0, (-0.522, -0.776, -1.213)),
+ ("O", 3, (0.627, 1.062, -0.000)),
+ ("CG", 4, (0.607, 1.382, -0.000)),
+ ("CD1", 5, (0.716, 1.195, -0.000)),
+ ("CD2", 5, (0.713, -1.194, -0.001)),
+ ("CE1", 5, (2.107, 1.200, -0.002)),
+ ("CE2", 5, (2.104, -1.201, -0.003)),
+ ("OH", 5, (4.168, -0.002, -0.005)),
+ ("CZ", 5, (2.791, -0.001, -0.003)),
+ ],
+ "VAL": [
+ ("N", 0, (-0.494, 1.373, -0.000)),
+ ("CA", 0, (0.000, 0.000, 0.000)),
+ ("C", 0, (1.527, -0.000, -0.000)),
+ ("CB", 0, (-0.533, -0.795, -1.213)),
+ ("O", 3, (0.627, 1.062, -0.000)),
+ ("CG1", 4, (0.540, 1.429, -0.000)),
+ ("CG2", 4, (0.533, -0.776, 1.203)),
+ ],
+}
+
+# A list of atoms (excluding hydrogen) for each AA type. PDB naming convention.
+residue_atoms: Dict[str, List[str]] = {
+ "ALA": ["C", "CA", "CB", "N", "O"],
+ "ARG": ["C", "CA", "CB", "CG", "CD", "CZ", "N", "NE", "O", "NH1", "NH2"],
+ "ASP": ["C", "CA", "CB", "CG", "N", "O", "OD1", "OD2"],
+ "ASN": ["C", "CA", "CB", "CG", "N", "ND2", "O", "OD1"],
+ "CYS": ["C", "CA", "CB", "N", "O", "SG"],
+ "GLU": ["C", "CA", "CB", "CG", "CD", "N", "O", "OE1", "OE2"],
+ "GLN": ["C", "CA", "CB", "CG", "CD", "N", "NE2", "O", "OE1"],
+ "GLY": ["C", "CA", "N", "O"],
+ "HIS": ["C", "CA", "CB", "CG", "CD2", "CE1", "N", "ND1", "NE2", "O"],
+ "ILE": ["C", "CA", "CB", "CG1", "CG2", "CD1", "N", "O"],
+ "LEU": ["C", "CA", "CB", "CG", "CD1", "CD2", "N", "O"],
+ "LYS": ["C", "CA", "CB", "CG", "CD", "CE", "N", "NZ", "O"],
+ "MET": ["C", "CA", "CB", "CG", "CE", "N", "O", "SD"],
+ "PHE": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O"],
+ "PRO": ["C", "CA", "CB", "CG", "CD", "N", "O"],
+ "SER": ["C", "CA", "CB", "N", "O", "OG"],
+ "THR": ["C", "CA", "CB", "CG2", "N", "O", "OG1"],
+ "TRP": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE2", "CE3", "CZ2", "CZ3", "CH2", "N", "NE1", "O"],
+ "TYR": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O", "OH"],
+ "VAL": ["C", "CA", "CB", "CG1", "CG2", "N", "O"],
+}
+
+# Naming swaps for ambiguous atom names.
+# Due to symmetries in the amino acids the naming of atoms is ambiguous in
+# 4 of the 20 amino acids.
+# (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities
+# in LEU, VAL and ARG can be resolved by using the 3d constellations of
+# the 'ambiguous' atoms and their neighbours)
+# TODO: ^ interpret this
+residue_atom_renaming_swaps: Dict[str, Dict[str, str]] = {
+ "ASP": {"OD1": "OD2"},
+ "GLU": {"OE1": "OE2"},
+ "PHE": {"CD1": "CD2", "CE1": "CE2"},
+ "TYR": {"CD1": "CD2", "CE1": "CE2"},
+}
+
+# Van der Waals radii [Angstroem] of the atoms (from Wikipedia)
+van_der_waals_radius: Dict[str, float] = {
+ "C": 1.7,
+ "N": 1.55,
+ "O": 1.52,
+ "S": 1.8,
+}
+
+Bond = collections.namedtuple("Bond", ["atom1_name", "atom2_name", "length", "stddev"])
+BondAngle = collections.namedtuple(
+ "BondAngle",
+ ["atom1_name", "atom2_name", "atom3name", "angle_rad", "stddev"],
+)
+
+
+def map_structure_with_atom_order(in_list: list, first_call: bool = True) -> list:
+ # Maps strings in a nested list structure to their corresponding index in atom_order
+ if first_call:
+ in_list = copy.deepcopy(in_list)
+ for i in range(len(in_list)):
+ if isinstance(in_list[i], list):
+ in_list[i] = map_structure_with_atom_order(in_list[i], first_call=False)
+ elif isinstance(in_list[i], str):
+ in_list[i] = atom_order[in_list[i]]
+ else:
+ raise ValueError("Unexpected type when mapping nested lists!")
+ return in_list
+
+
+@functools.lru_cache(maxsize=None)
+def load_stereo_chemical_props() -> (
+ Tuple[
+ Mapping[str, List[Bond]],
+ Mapping[str, List[Bond]],
+ Mapping[str, List[BondAngle]],
+ ]
+):
+ """Load stereo_chemical_props.txt into a nice structure.
+
+ Load literature values for bond lengths and bond angles and translate bond angles into the length of the opposite
+ edge of the triangle ("residue_virtual_bonds").
+
+ Returns:
+ residue_bonds: dict that maps resname --> list of Bond tuples residue_virtual_bonds: dict that maps resname -->
+ list of Bond tuples residue_bond_angles: dict that maps resname --> list of BondAngle tuples
+ """
+ # TODO: this file should be downloaded in a setup script
+ stereo_chemical_props = resources.read_text("openfold.resources", "stereo_chemical_props.txt")
+
+ lines_iter = iter(stereo_chemical_props.splitlines())
+ # Load bond lengths.
+ residue_bonds: Dict[str, List[Bond]] = {}
+ next(lines_iter) # Skip header line.
+ for line in lines_iter:
+ if line.strip() == "-":
+ break
+ bond, resname, bond_length, stddev = line.split()
+ atom1, atom2 = bond.split("-")
+ if resname not in residue_bonds:
+ residue_bonds[resname] = []
+ residue_bonds[resname].append(Bond(atom1, atom2, float(bond_length), float(stddev)))
+ residue_bonds["UNK"] = []
+
+ # Load bond angles.
+ residue_bond_angles: Dict[str, List[BondAngle]] = {}
+ next(lines_iter) # Skip empty line.
+ next(lines_iter) # Skip header line.
+ for line in lines_iter:
+ if line.strip() == "-":
+ break
+ bond, resname, angle_degree, stddev_degree = line.split()
+ atom1, atom2, atom3 = bond.split("-")
+ if resname not in residue_bond_angles:
+ residue_bond_angles[resname] = []
+ residue_bond_angles[resname].append(
+ BondAngle(
+ atom1,
+ atom2,
+ atom3,
+ float(angle_degree) / 180.0 * np.pi,
+ float(stddev_degree) / 180.0 * np.pi,
+ )
+ )
+ residue_bond_angles["UNK"] = []
+
+ def make_bond_key(atom1_name: str, atom2_name: str) -> str:
+ """Unique key to lookup bonds."""
+ return "-".join(sorted([atom1_name, atom2_name]))
+
+ # Translate bond angles into distances ("virtual bonds").
+ residue_virtual_bonds: Dict[str, List[Bond]] = {}
+ for resname, bond_angles in residue_bond_angles.items():
+ # Create a fast lookup dict for bond lengths.
+ bond_cache: Dict[str, Bond] = {}
+ for b in residue_bonds[resname]:
+ bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b
+ residue_virtual_bonds[resname] = []
+ for ba in bond_angles:
+ bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]
+ bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]
+
+ # Compute distance between atom1 and atom3 using the law of cosines
+ # c^2 = a^2 + b^2 - 2ab*cos(gamma).
+ gamma = ba.angle_rad
+ length = np.sqrt(bond1.length**2 + bond2.length**2 - 2 * bond1.length * bond2.length * np.cos(gamma))
+
+ # Propagation of uncertainty assuming uncorrelated errors.
+ dl_outer = 0.5 / length
+ dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer
+ dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer
+ dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer
+ stddev = np.sqrt(
+ (dl_dgamma * ba.stddev) ** 2 + (dl_db1 * bond1.stddev) ** 2 + (dl_db2 * bond2.stddev) ** 2
+ )
+ residue_virtual_bonds[resname].append(Bond(ba.atom1_name, ba.atom3name, length, stddev))
+
+ return (residue_bonds, residue_virtual_bonds, residue_bond_angles)
+
+
+# Between-residue bond lengths for general bonds (first element) and for Proline
+# (second element).
+between_res_bond_length_c_n: Tuple[float, float] = (1.329, 1.341)
+between_res_bond_length_stddev_c_n: Tuple[float, float] = (0.014, 0.016)
+
+# Between-residue cos_angles.
+between_res_cos_angles_c_n_ca: Tuple[float, float] = (-0.5203, 0.0353) # degrees: 121.352 +- 2.315
+between_res_cos_angles_ca_c_n: Tuple[float, float] = (-0.4473, 0.0311) # degrees: 116.568 +- 1.995
+
+# This mapping is used when we need to store atom data in a format that requires
+# fixed atom data size for every residue (e.g. a numpy array).
+atom_types: List[str] = [
+ "N",
+ "CA",
+ "C",
+ "CB",
+ "O",
+ "CG",
+ "CG1",
+ "CG2",
+ "OG",
+ "OG1",
+ "SG",
+ "CD",
+ "CD1",
+ "CD2",
+ "ND1",
+ "ND2",
+ "OD1",
+ "OD2",
+ "SD",
+ "CE",
+ "CE1",
+ "CE2",
+ "CE3",
+ "NE",
+ "NE1",
+ "NE2",
+ "OE1",
+ "OE2",
+ "CH2",
+ "NH1",
+ "NH2",
+ "OH",
+ "CZ",
+ "CZ2",
+ "CZ3",
+ "NZ",
+ "OXT",
+]
+atom_order: Dict[str, int] = {atom_type: i for i, atom_type in enumerate(atom_types)}
+atom_type_num = len(atom_types) # := 37.
+
+# A compact atom encoding with 14 columns
+# pylint: disable=line-too-long
+# pylint: disable=bad-whitespace
+restype_name_to_atom14_names: Dict[str, List[str]] = {
+ "ALA": ["N", "CA", "C", "O", "CB", "", "", "", "", "", "", "", "", ""],
+ "ARG": ["N", "CA", "C", "O", "CB", "CG", "CD", "NE", "CZ", "NH1", "NH2", "", "", ""],
+ "ASN": ["N", "CA", "C", "O", "CB", "CG", "OD1", "ND2", "", "", "", "", "", ""],
+ "ASP": ["N", "CA", "C", "O", "CB", "CG", "OD1", "OD2", "", "", "", "", "", ""],
+ "CYS": ["N", "CA", "C", "O", "CB", "SG", "", "", "", "", "", "", "", ""],
+ "GLN": ["N", "CA", "C", "O", "CB", "CG", "CD", "OE1", "NE2", "", "", "", "", ""],
+ "GLU": ["N", "CA", "C", "O", "CB", "CG", "CD", "OE1", "OE2", "", "", "", "", ""],
+ "GLY": ["N", "CA", "C", "O", "", "", "", "", "", "", "", "", "", ""],
+ "HIS": ["N", "CA", "C", "O", "CB", "CG", "ND1", "CD2", "CE1", "NE2", "", "", "", ""],
+ "ILE": ["N", "CA", "C", "O", "CB", "CG1", "CG2", "CD1", "", "", "", "", "", ""],
+ "LEU": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "", "", "", "", "", ""],
+ "LYS": ["N", "CA", "C", "O", "CB", "CG", "CD", "CE", "NZ", "", "", "", "", ""],
+ "MET": ["N", "CA", "C", "O", "CB", "CG", "SD", "CE", "", "", "", "", "", ""],
+ "PHE": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "", "", ""],
+ "PRO": ["N", "CA", "C", "O", "CB", "CG", "CD", "", "", "", "", "", "", ""],
+ "SER": ["N", "CA", "C", "O", "CB", "OG", "", "", "", "", "", "", "", ""],
+ "THR": ["N", "CA", "C", "O", "CB", "OG1", "CG2", "", "", "", "", "", "", ""],
+ "TRP": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "NE1", "CE2", "CE3", "CZ2", "CZ3", "CH2"],
+ "TYR": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "OH", "", ""],
+ "VAL": ["N", "CA", "C", "O", "CB", "CG1", "CG2", "", "", "", "", "", "", ""],
+ "UNK": ["", "", "", "", "", "", "", "", "", "", "", "", "", ""],
+}
+# pylint: enable=line-too-long
+# pylint: enable=bad-whitespace
+
+
+# This is the standard residue order when coding AA type as a number.
+# Reproduce it by taking 3-letter AA codes and sorting them alphabetically.
+restypes: List[str] = [
+ "A",
+ "R",
+ "N",
+ "D",
+ "C",
+ "Q",
+ "E",
+ "G",
+ "H",
+ "I",
+ "L",
+ "K",
+ "M",
+ "F",
+ "P",
+ "S",
+ "T",
+ "W",
+ "Y",
+ "V",
+]
+restype_order: Dict[str, int] = {restype: i for i, restype in enumerate(restypes)}
+restype_num = len(restypes) # := 20.
+unk_restype_index = restype_num # Catch-all index for unknown restypes.
+
+restypes_with_x: List[str] = restypes + ["X"]
+restype_order_with_x: Dict[str, int] = {restype: i for i, restype in enumerate(restypes_with_x)}
+
+
+def sequence_to_onehot(sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False) -> np.ndarray:
+ """Maps the given sequence into a one-hot encoded matrix.
+
+ Args:
+ sequence: An amino acid sequence.
+ mapping: A dictionary mapping amino acids to integers.
+ map_unknown_to_x: If True, any amino acid that is not in the mapping will be
+ mapped to the unknown amino acid 'X'. If the mapping doesn't contain amino acid 'X', an error will be thrown.
+ If False, any amino acid not in the mapping will throw an error.
+
+ Returns:
+ A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of the sequence.
+
+ Raises:
+ ValueError: If the mapping doesn't contain values from 0 to
+ num_unique_aas - 1 without any gaps.
+ """
+ num_entries = max(mapping.values()) + 1
+
+ if sorted(set(mapping.values())) != list(range(num_entries)):
+ raise ValueError(
+ "The mapping must have values from 0 to num_unique_aas-1 without any gaps. Got: %s"
+ % sorted(mapping.values())
+ )
+
+ one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)
+
+ for aa_index, aa_type in enumerate(sequence):
+ if map_unknown_to_x:
+ if aa_type.isalpha() and aa_type.isupper():
+ aa_id = mapping.get(aa_type, mapping["X"])
+ else:
+ raise ValueError(f"Invalid character in the sequence: {aa_type}")
+ else:
+ aa_id = mapping[aa_type]
+ one_hot_arr[aa_index, aa_id] = 1
+
+ return one_hot_arr
+
+
+restype_1to3: Dict[str, str] = {
+ "A": "ALA",
+ "R": "ARG",
+ "N": "ASN",
+ "D": "ASP",
+ "C": "CYS",
+ "Q": "GLN",
+ "E": "GLU",
+ "G": "GLY",
+ "H": "HIS",
+ "I": "ILE",
+ "L": "LEU",
+ "K": "LYS",
+ "M": "MET",
+ "F": "PHE",
+ "P": "PRO",
+ "S": "SER",
+ "T": "THR",
+ "W": "TRP",
+ "Y": "TYR",
+ "V": "VAL",
+}
+
+
+# NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple
+# 1-to-1 mapping of 3 letter names to one letter names. The latter contains
+# many more, and less common, three letter names as keys and maps many of these
+# to the same one letter name (including 'X' and 'U' which we don't use here).
+restype_3to1: Dict[str, str] = {v: k for k, v in restype_1to3.items()}
+
+# Define a restype name for all unknown residues.
+unk_restype = "UNK"
+
+resnames: List[str] = [restype_1to3[r] for r in restypes] + [unk_restype]
+resname_to_idx: Dict[str, int] = {resname: i for i, resname in enumerate(resnames)}
+
+
+# The mapping here uses hhblits convention, so that B is mapped to D, J and O
+# are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the
+# remaining 20 amino acids are kept in alphabetical order.
+# There are 2 non-amino acid codes, X (representing any amino acid) and
+# "-" representing a missing amino acid in an alignment. The id for these
+# codes is put at the end (20 and 21) so that they can easily be ignored if
+# desired.
+HHBLITS_AA_TO_ID: Dict[str, int] = {
+ "A": 0,
+ "B": 2,
+ "C": 1,
+ "D": 2,
+ "E": 3,
+ "F": 4,
+ "G": 5,
+ "H": 6,
+ "I": 7,
+ "J": 20,
+ "K": 8,
+ "L": 9,
+ "M": 10,
+ "N": 11,
+ "O": 20,
+ "P": 12,
+ "Q": 13,
+ "R": 14,
+ "S": 15,
+ "T": 16,
+ "U": 1,
+ "V": 17,
+ "W": 18,
+ "X": 20,
+ "Y": 19,
+ "Z": 3,
+ "-": 21,
+}
+
+# Partial inversion of HHBLITS_AA_TO_ID.
+ID_TO_HHBLITS_AA: Dict[int, str] = {
+ 0: "A",
+ 1: "C", # Also U.
+ 2: "D", # Also B.
+ 3: "E", # Also Z.
+ 4: "F",
+ 5: "G",
+ 6: "H",
+ 7: "I",
+ 8: "K",
+ 9: "L",
+ 10: "M",
+ 11: "N",
+ 12: "P",
+ 13: "Q",
+ 14: "R",
+ 15: "S",
+ 16: "T",
+ 17: "V",
+ 18: "W",
+ 19: "Y",
+ 20: "X", # Includes J and O.
+ 21: "-",
+}
+
+restypes_with_x_and_gap: List[str] = restypes + ["X", "-"]
+MAP_HHBLITS_AATYPE_TO_OUR_AATYPE: Tuple[int, ...] = tuple(
+ restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i]) for i in range(len(restypes_with_x_and_gap))
+)
+
+
+def _make_standard_atom_mask() -> np.ndarray:
+ """Returns [num_res_types, num_atom_types] mask array."""
+ # +1 to account for unknown (all 0s).
+ mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)
+ for restype, restype_letter in enumerate(restypes):
+ restype_name = restype_1to3[restype_letter]
+ atom_names = residue_atoms[restype_name]
+ for atom_name in atom_names:
+ atom_type = atom_order[atom_name]
+ mask[restype, atom_type] = 1
+ return mask
+
+
+STANDARD_ATOM_MASK = _make_standard_atom_mask()
+
+
+# A one hot representation for the first and second atoms defining the axis
+# of rotation for each chi-angle in each residue.
+def chi_angle_atom(atom_index: int) -> np.ndarray:
+ """Define chi-angle rigid groups via one-hot representations."""
+ chi_angles_index = {}
+ one_hots = []
+
+ for k, v in chi_angles_atoms.items():
+ indices = [atom_types.index(s[atom_index]) for s in v]
+ indices.extend([-1] * (4 - len(indices)))
+ chi_angles_index[k] = indices
+
+ for r in restypes:
+ res3 = restype_1to3[r]
+ one_hot = np.eye(atom_type_num)[chi_angles_index[res3]]
+ one_hots.append(one_hot)
+
+ one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`.
+ one_hot = np.stack(one_hots, axis=0)
+ one_hot = np.transpose(one_hot, [0, 2, 1])
+
+ return one_hot
+
+
+chi_atom_1_one_hot = chi_angle_atom(1)
+chi_atom_2_one_hot = chi_angle_atom(2)
+
+# An array like chi_angles_atoms but using indices rather than names.
+chi_angles_atom_indices_list: List[List[List[str]]] = [chi_angles_atoms[restype_1to3[r]] for r in restypes]
+chi_angles_atom_indices_ours: list = map_structure_with_atom_order(chi_angles_atom_indices_list)
+chi_angles_atom_indices = np.array(
+ [chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms))) for chi_atoms in chi_angles_atom_indices_list]
+)
+
+# Mapping from (res_name, atom_name) pairs to the atom's chi group index
+# and atom index within that group.
+chi_groups_for_atom: Dict[Tuple[str, str], List[Tuple[int, int]]] = collections.defaultdict(list)
+for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items():
+ for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res):
+ for atom_i, atom in enumerate(chi_group):
+ chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i))
+chi_groups_for_atom = dict(chi_groups_for_atom)
+
+
+def _make_rigid_transformation_4x4(ex: np.ndarray, ey: np.ndarray, translation: np.ndarray) -> np.ndarray:
+ """Create a rigid 4x4 transformation matrix from two axes and transl."""
+ # Normalize ex.
+ ex_normalized = ex / np.linalg.norm(ex)
+
+ # make ey perpendicular to ex
+ ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized
+ ey_normalized /= np.linalg.norm(ey_normalized)
+
+ # compute ez as cross product
+ eznorm = np.cross(ex_normalized, ey_normalized)
+ m = np.stack([ex_normalized, ey_normalized, eznorm, translation]).transpose()
+ m = np.concatenate([m, [[0.0, 0.0, 0.0, 1.0]]], axis=0)
+ return m
+
+
+# create an array with (restype, atomtype) --> rigid_group_idx
+# and an array with (restype, atomtype, coord) for the atom positions
+# and compute affine transformation matrices (4,4) from one rigid group to the
+# previous group
+restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=int)
+restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
+restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32)
+restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=int)
+restype_atom14_mask = np.zeros([21, 14], dtype=np.float32)
+restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32)
+restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32)
+
+
+def _make_rigid_group_constants() -> None:
+ """Fill the arrays above."""
+ for restype, restype_letter in enumerate(restypes):
+ resname = restype_1to3[restype_letter]
+ for atomname, group_idx, atom_position in rigid_group_atom_positions[resname]:
+ atomtype = atom_order[atomname]
+ restype_atom37_to_rigid_group[restype, atomtype] = group_idx
+ restype_atom37_mask[restype, atomtype] = 1
+ restype_atom37_rigid_group_positions[restype, atomtype, :] = atom_position
+
+ atom14idx = restype_name_to_atom14_names[resname].index(atomname)
+ restype_atom14_to_rigid_group[restype, atom14idx] = group_idx
+ restype_atom14_mask[restype, atom14idx] = 1
+ restype_atom14_rigid_group_positions[restype, atom14idx, :] = atom_position
+
+ for restype, restype_letter in enumerate(restypes):
+ resname = restype_1to3[restype_letter]
+ atom_positions: Dict[str, np.ndarray] = {
+ name: np.array(pos) for name, _, pos in rigid_group_atom_positions[resname]
+ }
+
+ # backbone to backbone is the identity transform
+ restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4)
+
+ # pre-omega-frame to backbone (currently dummy identity matrix)
+ restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4)
+
+ # phi-frame to backbone
+ mat = _make_rigid_transformation_4x4(
+ ex=atom_positions["N"] - atom_positions["CA"],
+ ey=np.array([1.0, 0.0, 0.0]),
+ translation=atom_positions["N"],
+ )
+ restype_rigid_group_default_frame[restype, 2, :, :] = mat
+
+ # psi-frame to backbone
+ mat = _make_rigid_transformation_4x4(
+ ex=atom_positions["C"] - atom_positions["CA"],
+ ey=atom_positions["CA"] - atom_positions["N"],
+ translation=atom_positions["C"],
+ )
+ restype_rigid_group_default_frame[restype, 3, :, :] = mat
+
+ # chi1-frame to backbone
+ if chi_angles_mask[restype][0]:
+ base_atom_names = chi_angles_atoms[resname][0]
+ base_atom_positions = [atom_positions[name] for name in base_atom_names]
+ mat = _make_rigid_transformation_4x4(
+ ex=base_atom_positions[2] - base_atom_positions[1],
+ ey=base_atom_positions[0] - base_atom_positions[1],
+ translation=base_atom_positions[2],
+ )
+ restype_rigid_group_default_frame[restype, 4, :, :] = mat
+
+ # chi2-frame to chi1-frame
+ # chi3-frame to chi2-frame
+ # chi4-frame to chi3-frame
+ # luckily all rotation axes for the next frame start at (0,0,0) of the
+ # previous frame
+ for chi_idx in range(1, 4):
+ if chi_angles_mask[restype][chi_idx]:
+ axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2]
+ axis_end_atom_position = atom_positions[axis_end_atom_name]
+ mat = _make_rigid_transformation_4x4(
+ ex=axis_end_atom_position,
+ ey=np.array([-1.0, 0.0, 0.0]),
+ translation=axis_end_atom_position,
+ )
+ restype_rigid_group_default_frame[restype, 4 + chi_idx, :, :] = mat
+
+
+_make_rigid_group_constants()
+
+
+def make_atom14_dists_bounds(
+ overlap_tolerance: float = 1.5,
+ bond_length_tolerance_factor: int = 15,
+) -> Dict[str, np.ndarray]:
+ """compute upper and lower bounds for bonds to assess violations."""
+ restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32)
+ restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32)
+ restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32)
+ residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props()
+ for restype, restype_letter in enumerate(restypes):
+ resname = restype_1to3[restype_letter]
+ atom_list = restype_name_to_atom14_names[resname]
+
+ # create lower and upper bounds for clashes
+ for atom1_idx, atom1_name in enumerate(atom_list):
+ if not atom1_name:
+ continue
+ atom1_radius = van_der_waals_radius[atom1_name[0]]
+ for atom2_idx, atom2_name in enumerate(atom_list):
+ if (not atom2_name) or atom1_idx == atom2_idx:
+ continue
+ atom2_radius = van_der_waals_radius[atom2_name[0]]
+ lower = atom1_radius + atom2_radius - overlap_tolerance
+ upper = 1e10
+ restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
+ restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
+ restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
+ restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
+
+ # overwrite lower and upper bounds for bonds and angles
+ for b in residue_bonds[resname] + residue_virtual_bonds[resname]:
+ atom1_idx = atom_list.index(b.atom1_name)
+ atom2_idx = atom_list.index(b.atom2_name)
+ lower = b.length - bond_length_tolerance_factor * b.stddev
+ upper = b.length + bond_length_tolerance_factor * b.stddev
+ restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
+ restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
+ restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
+ restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
+ restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev
+ restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev
+ return {
+ "lower_bound": restype_atom14_bond_lower_bound, # shape (21,14,14)
+ "upper_bound": restype_atom14_bond_upper_bound, # shape (21,14,14)
+ "stddev": restype_atom14_bond_stddev, # shape (21,14,14)
+ }
+
+
+restype_atom14_ambiguous_atoms = np.zeros((21, 14), dtype=np.float32)
+restype_atom14_ambiguous_atoms_swap_idx: np.ndarray = np.tile(np.arange(14, dtype=int), (21, 1))
+
+
+def _make_atom14_ambiguity_feats() -> None:
+ for res, pairs in residue_atom_renaming_swaps.items():
+ res_idx = restype_order[restype_3to1[res]]
+ for atom1, atom2 in pairs.items():
+ atom1_idx = restype_name_to_atom14_names[res].index(atom1)
+ atom2_idx = restype_name_to_atom14_names[res].index(atom2)
+ restype_atom14_ambiguous_atoms[res_idx, atom1_idx] = 1
+ restype_atom14_ambiguous_atoms[res_idx, atom2_idx] = 1
+ restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom1_idx] = atom2_idx
+ restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom2_idx] = atom1_idx
+
+
+_make_atom14_ambiguity_feats()
+
+
+def aatype_to_str_sequence(aatype: Sequence[int]) -> str:
+ return "".join([restypes_with_x[aatype[i]] for i in range(len(aatype))])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/rigid_utils.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/rigid_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bc2fe5f5c4ebff888e2d66eae3647073be89b4f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/rigid_utils.py
@@ -0,0 +1,1242 @@
+# Copyright 2021 AlQuraishi Laboratory
+# Copyright 2021 DeepMind Technologies Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+from functools import lru_cache
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
+
+import numpy as np
+import torch
+
+
+def rot_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
+ """
+ Performs matrix multiplication of two rotation matrix tensors. Written out by hand to avoid AMP downcasting.
+
+ Args:
+ a: [*, 3, 3] left multiplicand
+ b: [*, 3, 3] right multiplicand
+ Returns:
+ The product ab
+ """
+
+ def row_mul(i: int) -> torch.Tensor:
+ return torch.stack(
+ [
+ a[..., i, 0] * b[..., 0, 0] + a[..., i, 1] * b[..., 1, 0] + a[..., i, 2] * b[..., 2, 0],
+ a[..., i, 0] * b[..., 0, 1] + a[..., i, 1] * b[..., 1, 1] + a[..., i, 2] * b[..., 2, 1],
+ a[..., i, 0] * b[..., 0, 2] + a[..., i, 1] * b[..., 1, 2] + a[..., i, 2] * b[..., 2, 2],
+ ],
+ dim=-1,
+ )
+
+ return torch.stack(
+ [
+ row_mul(0),
+ row_mul(1),
+ row_mul(2),
+ ],
+ dim=-2,
+ )
+
+
+def rot_vec_mul(r: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
+ """
+ Applies a rotation to a vector. Written out by hand to avoid transfer to avoid AMP downcasting.
+
+ Args:
+ r: [*, 3, 3] rotation matrices
+ t: [*, 3] coordinate tensors
+ Returns:
+ [*, 3] rotated coordinates
+ """
+ x, y, z = torch.unbind(t, dim=-1)
+ return torch.stack(
+ [
+ r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z,
+ r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z,
+ r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z,
+ ],
+ dim=-1,
+ )
+
+
+@lru_cache(maxsize=None)
+def identity_rot_mats(
+ batch_dims: Tuple[int, ...],
+ dtype: Optional[torch.dtype] = None,
+ device: Optional[torch.device] = None,
+ requires_grad: bool = True,
+) -> torch.Tensor:
+ rots = torch.eye(3, dtype=dtype, device=device, requires_grad=requires_grad)
+ rots = rots.view(*((1,) * len(batch_dims)), 3, 3)
+ rots = rots.expand(*batch_dims, -1, -1)
+ rots = rots.contiguous()
+
+ return rots
+
+
+@lru_cache(maxsize=None)
+def identity_trans(
+ batch_dims: Tuple[int, ...],
+ dtype: Optional[torch.dtype] = None,
+ device: Optional[torch.device] = None,
+ requires_grad: bool = True,
+) -> torch.Tensor:
+ trans = torch.zeros((*batch_dims, 3), dtype=dtype, device=device, requires_grad=requires_grad)
+ return trans
+
+
+@lru_cache(maxsize=None)
+def identity_quats(
+ batch_dims: Tuple[int, ...],
+ dtype: Optional[torch.dtype] = None,
+ device: Optional[torch.device] = None,
+ requires_grad: bool = True,
+) -> torch.Tensor:
+ quat = torch.zeros((*batch_dims, 4), dtype=dtype, device=device, requires_grad=requires_grad)
+
+ with torch.no_grad():
+ quat[..., 0] = 1
+
+ return quat
+
+
+_quat_elements: List[str] = ["a", "b", "c", "d"]
+_qtr_keys: List[str] = [l1 + l2 for l1 in _quat_elements for l2 in _quat_elements]
+_qtr_ind_dict: Dict[str, int] = {key: ind for ind, key in enumerate(_qtr_keys)}
+
+
+def _to_mat(pairs: List[Tuple[str, int]]) -> np.ndarray:
+ mat = np.zeros((4, 4))
+ for key, value in pairs:
+ ind = _qtr_ind_dict[key]
+ mat[ind // 4][ind % 4] = value
+
+ return mat
+
+
+_QTR_MAT = np.zeros((4, 4, 3, 3))
+_QTR_MAT[..., 0, 0] = _to_mat([("aa", 1), ("bb", 1), ("cc", -1), ("dd", -1)])
+_QTR_MAT[..., 0, 1] = _to_mat([("bc", 2), ("ad", -2)])
+_QTR_MAT[..., 0, 2] = _to_mat([("bd", 2), ("ac", 2)])
+_QTR_MAT[..., 1, 0] = _to_mat([("bc", 2), ("ad", 2)])
+_QTR_MAT[..., 1, 1] = _to_mat([("aa", 1), ("bb", -1), ("cc", 1), ("dd", -1)])
+_QTR_MAT[..., 1, 2] = _to_mat([("cd", 2), ("ab", -2)])
+_QTR_MAT[..., 2, 0] = _to_mat([("bd", 2), ("ac", -2)])
+_QTR_MAT[..., 2, 1] = _to_mat([("cd", 2), ("ab", 2)])
+_QTR_MAT[..., 2, 2] = _to_mat([("aa", 1), ("bb", -1), ("cc", -1), ("dd", 1)])
+
+
+def quat_to_rot(quat: torch.Tensor) -> torch.Tensor:
+ """
+ Converts a quaternion to a rotation matrix.
+
+ Args:
+ quat: [*, 4] quaternions
+ Returns:
+ [*, 3, 3] rotation matrices
+ """
+ # [*, 4, 4]
+ quat = quat[..., None] * quat[..., None, :]
+
+ # [4, 4, 3, 3]
+ mat = _get_quat("_QTR_MAT", dtype=quat.dtype, device=quat.device)
+
+ # [*, 4, 4, 3, 3]
+ shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape)
+ quat = quat[..., None, None] * shaped_qtr_mat
+
+ # [*, 3, 3]
+ return torch.sum(quat, dim=(-3, -4))
+
+
+def rot_to_quat(rot: torch.Tensor) -> torch.Tensor:
+ if rot.shape[-2:] != (3, 3):
+ raise ValueError("Input rotation is incorrectly shaped")
+
+ [[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = [[rot[..., i, j] for j in range(3)] for i in range(3)]
+
+ k = [
+ [
+ xx + yy + zz,
+ zy - yz,
+ xz - zx,
+ yx - xy,
+ ],
+ [
+ zy - yz,
+ xx - yy - zz,
+ xy + yx,
+ xz + zx,
+ ],
+ [
+ xz - zx,
+ xy + yx,
+ yy - xx - zz,
+ yz + zy,
+ ],
+ [
+ yx - xy,
+ xz + zx,
+ yz + zy,
+ zz - xx - yy,
+ ],
+ ]
+
+ _, vectors = torch.linalg.eigh((1.0 / 3.0) * torch.stack([torch.stack(t, dim=-1) for t in k], dim=-2))
+ return vectors[..., -1]
+
+
+_QUAT_MULTIPLY = np.zeros((4, 4, 4))
+_QUAT_MULTIPLY[:, :, 0] = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]
+
+_QUAT_MULTIPLY[:, :, 1] = [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0]]
+
+_QUAT_MULTIPLY[:, :, 2] = [[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, 1, 0, 0]]
+
+_QUAT_MULTIPLY[:, :, 3] = [[0, 0, 0, 1], [0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0]]
+
+_QUAT_MULTIPLY_BY_VEC = _QUAT_MULTIPLY[:, 1:, :]
+
+_CACHED_QUATS: Dict[str, np.ndarray] = {
+ "_QTR_MAT": _QTR_MAT,
+ "_QUAT_MULTIPLY": _QUAT_MULTIPLY,
+ "_QUAT_MULTIPLY_BY_VEC": _QUAT_MULTIPLY_BY_VEC,
+}
+
+
+@lru_cache(maxsize=None)
+def _get_quat(quat_key: str, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
+ return torch.tensor(_CACHED_QUATS[quat_key], dtype=dtype, device=device)
+
+
+def quat_multiply(quat1: torch.Tensor, quat2: torch.Tensor) -> torch.Tensor:
+ """Multiply a quaternion by another quaternion."""
+ mat = _get_quat("_QUAT_MULTIPLY", dtype=quat1.dtype, device=quat1.device)
+ reshaped_mat = mat.view((1,) * len(quat1.shape[:-1]) + mat.shape)
+ return torch.sum(reshaped_mat * quat1[..., :, None, None] * quat2[..., None, :, None], dim=(-3, -2))
+
+
+def quat_multiply_by_vec(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
+ """Multiply a quaternion by a pure-vector quaternion."""
+ mat = _get_quat("_QUAT_MULTIPLY_BY_VEC", dtype=quat.dtype, device=quat.device)
+ reshaped_mat = mat.view((1,) * len(quat.shape[:-1]) + mat.shape)
+ return torch.sum(reshaped_mat * quat[..., :, None, None] * vec[..., None, :, None], dim=(-3, -2))
+
+
+def invert_rot_mat(rot_mat: torch.Tensor) -> torch.Tensor:
+ return rot_mat.transpose(-1, -2)
+
+
+def invert_quat(quat: torch.Tensor) -> torch.Tensor:
+ quat_prime = quat.clone()
+ quat_prime[..., 1:] *= -1
+ inv = quat_prime / torch.sum(quat**2, dim=-1, keepdim=True)
+ return inv
+
+
+class Rotation:
+ """
+ A 3D rotation. Depending on how the object is initialized, the rotation is represented by either a rotation matrix
+ or a quaternion, though both formats are made available by helper functions. To simplify gradient computation, the
+ underlying format of the rotation cannot be changed in-place. Like Rigid, the class is designed to mimic the
+ behavior of a torch Tensor, almost as if each Rotation object were a tensor of rotations, in one format or another.
+ """
+
+ def __init__(
+ self,
+ rot_mats: Optional[torch.Tensor] = None,
+ quats: Optional[torch.Tensor] = None,
+ normalize_quats: bool = True,
+ ):
+ """
+ Args:
+ rot_mats:
+ A [*, 3, 3] rotation matrix tensor. Mutually exclusive with quats
+ quats:
+ A [*, 4] quaternion. Mutually exclusive with rot_mats. If normalize_quats is not True, must be a unit
+ quaternion
+ normalize_quats:
+ If quats is specified, whether to normalize quats
+ """
+ if (rot_mats is None and quats is None) or (rot_mats is not None and quats is not None):
+ raise ValueError("Exactly one input argument must be specified")
+
+ if (rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or (quats is not None and quats.shape[-1] != 4):
+ raise ValueError("Incorrectly shaped rotation matrix or quaternion")
+
+ # Force full-precision
+ if quats is not None:
+ quats = quats.to(dtype=torch.float32)
+ if rot_mats is not None:
+ rot_mats = rot_mats.to(dtype=torch.float32)
+
+ if quats is not None and normalize_quats:
+ quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)
+
+ self._rot_mats = rot_mats
+ self._quats = quats
+
+ @staticmethod
+ def identity(
+ shape,
+ dtype: Optional[torch.dtype] = None,
+ device: Optional[torch.device] = None,
+ requires_grad: bool = True,
+ fmt: str = "quat",
+ ) -> Rotation:
+ """
+ Returns an identity Rotation.
+
+ Args:
+ shape:
+ The "shape" of the resulting Rotation object. See documentation for the shape property
+ dtype:
+ The torch dtype for the rotation
+ device:
+ The torch device for the new rotation
+ requires_grad:
+ Whether the underlying tensors in the new rotation object should require gradient computation
+ fmt:
+ One of "quat" or "rot_mat". Determines the underlying format of the new object's rotation
+ Returns:
+ A new identity rotation
+ """
+ if fmt == "rot_mat":
+ rot_mats = identity_rot_mats(
+ shape,
+ dtype,
+ device,
+ requires_grad,
+ )
+ return Rotation(rot_mats=rot_mats, quats=None)
+ elif fmt == "quat":
+ quats = identity_quats(shape, dtype, device, requires_grad)
+ return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
+ else:
+ raise ValueError(f"Invalid format: f{fmt}")
+
+ # Magic methods
+
+ def __getitem__(self, index: Any) -> Rotation:
+ """
+ Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape
+ property.
+
+ Args:
+ index:
+ A torch index. E.g. (1, 3, 2), or (slice(None,))
+ Returns:
+ The indexed rotation
+ """
+ if type(index) != tuple:
+ index = (index,)
+
+ if self._rot_mats is not None:
+ rot_mats = self._rot_mats[index + (slice(None), slice(None))]
+ return Rotation(rot_mats=rot_mats)
+ elif self._quats is not None:
+ quats = self._quats[index + (slice(None),)]
+ return Rotation(quats=quats, normalize_quats=False)
+ else:
+ raise ValueError("Both rotations are None")
+
+ def __mul__(self, right: torch.Tensor) -> Rotation:
+ """
+ Pointwise left multiplication of the rotation with a tensor. Can be used to e.g. mask the Rotation.
+
+ Args:
+ right:
+ The tensor multiplicand
+ Returns:
+ The product
+ """
+ if not (isinstance(right, torch.Tensor)):
+ raise TypeError("The other multiplicand must be a Tensor")
+
+ if self._rot_mats is not None:
+ rot_mats = self._rot_mats * right[..., None, None]
+ return Rotation(rot_mats=rot_mats, quats=None)
+ elif self._quats is not None:
+ quats = self._quats * right[..., None]
+ return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
+ else:
+ raise ValueError("Both rotations are None")
+
+ def __rmul__(self, left: torch.Tensor) -> Rotation:
+ """
+ Reverse pointwise multiplication of the rotation with a tensor.
+
+ Args:
+ left:
+ The left multiplicand
+ Returns:
+ The product
+ """
+ return self.__mul__(left)
+
+ # Properties
+
+ @property
+ def shape(self) -> torch.Size:
+ """
+ Returns the virtual shape of the rotation object. This shape is defined as the batch dimensions of the
+ underlying rotation matrix or quaternion. If the Rotation was initialized with a [10, 3, 3] rotation matrix
+ tensor, for example, the resulting shape would be [10].
+
+ Returns:
+ The virtual shape of the rotation object
+ """
+ if self._rot_mats is not None:
+ return self._rot_mats.shape[:-2]
+ elif self._quats is not None:
+ return self._quats.shape[:-1]
+ else:
+ raise ValueError("Both rotations are None")
+
+ @property
+ def dtype(self) -> torch.dtype:
+ """
+ Returns the dtype of the underlying rotation.
+
+ Returns:
+ The dtype of the underlying rotation
+ """
+ if self._rot_mats is not None:
+ return self._rot_mats.dtype
+ elif self._quats is not None:
+ return self._quats.dtype
+ else:
+ raise ValueError("Both rotations are None")
+
+ @property
+ def device(self) -> torch.device:
+ """
+ The device of the underlying rotation
+
+ Returns:
+ The device of the underlying rotation
+ """
+ if self._rot_mats is not None:
+ return self._rot_mats.device
+ elif self._quats is not None:
+ return self._quats.device
+ else:
+ raise ValueError("Both rotations are None")
+
+ @property
+ def requires_grad(self) -> bool:
+ """
+ Returns the requires_grad property of the underlying rotation
+
+ Returns:
+ The requires_grad property of the underlying tensor
+ """
+ if self._rot_mats is not None:
+ return self._rot_mats.requires_grad
+ elif self._quats is not None:
+ return self._quats.requires_grad
+ else:
+ raise ValueError("Both rotations are None")
+
+ def get_rot_mats(self) -> torch.Tensor:
+ """
+ Returns the underlying rotation as a rotation matrix tensor.
+
+ Returns:
+ The rotation as a rotation matrix tensor
+ """
+ if self._rot_mats is not None:
+ return self._rot_mats
+ elif self._quats is not None:
+ return quat_to_rot(self._quats)
+ else:
+ raise ValueError("Both rotations are None")
+
+ def get_quats(self) -> torch.Tensor:
+ """
+ Returns the underlying rotation as a quaternion tensor.
+
+ Depending on whether the Rotation was initialized with a quaternion, this function may call torch.linalg.eigh.
+
+ Returns:
+ The rotation as a quaternion tensor.
+ """
+ if self._rot_mats is not None:
+ return rot_to_quat(self._rot_mats)
+ elif self._quats is not None:
+ return self._quats
+ else:
+ raise ValueError("Both rotations are None")
+
+ def get_cur_rot(self) -> torch.Tensor:
+ """
+ Return the underlying rotation in its current form
+
+ Returns:
+ The stored rotation
+ """
+ if self._rot_mats is not None:
+ return self._rot_mats
+ elif self._quats is not None:
+ return self._quats
+ else:
+ raise ValueError("Both rotations are None")
+
+ # Rotation functions
+
+ def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool = True) -> Rotation:
+ """
+ Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion
+ update, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the
+ desired (not necessarily unit) quaternion update.
+
+ Args:
+ q_update_vec:
+ A [*, 3] quaternion update tensor
+ normalize_quats:
+ Whether to normalize the output quaternion
+ Returns:
+ An updated Rotation
+ """
+ quats = self.get_quats()
+ new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)
+ return Rotation(
+ rot_mats=None,
+ quats=new_quats,
+ normalize_quats=normalize_quats,
+ )
+
+ def compose_r(self, r: Rotation) -> Rotation:
+ """
+ Compose the rotation matrices of the current Rotation object with those of another.
+
+ Args:
+ r:
+ An update rotation object
+ Returns:
+ An updated rotation object
+ """
+ r1 = self.get_rot_mats()
+ r2 = r.get_rot_mats()
+ new_rot_mats = rot_matmul(r1, r2)
+ return Rotation(rot_mats=new_rot_mats, quats=None)
+
+ def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:
+ """
+ Compose the quaternions of the current Rotation object with those of another.
+
+ Depending on whether either Rotation was initialized with quaternions, this function may call
+ torch.linalg.eigh.
+
+ Args:
+ r:
+ An update rotation object
+ Returns:
+ An updated rotation object
+ """
+ q1 = self.get_quats()
+ q2 = r.get_quats()
+ new_quats = quat_multiply(q1, q2)
+ return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)
+
+ def apply(self, pts: torch.Tensor) -> torch.Tensor:
+ """
+ Apply the current Rotation as a rotation matrix to a set of 3D coordinates.
+
+ Args:
+ pts:
+ A [*, 3] set of points
+ Returns:
+ [*, 3] rotated points
+ """
+ rot_mats = self.get_rot_mats()
+ return rot_vec_mul(rot_mats, pts)
+
+ def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
+ """
+ The inverse of the apply() method.
+
+ Args:
+ pts:
+ A [*, 3] set of points
+ Returns:
+ [*, 3] inverse-rotated points
+ """
+ rot_mats = self.get_rot_mats()
+ inv_rot_mats = invert_rot_mat(rot_mats)
+ return rot_vec_mul(inv_rot_mats, pts)
+
+ def invert(self) -> Rotation:
+ """
+ Returns the inverse of the current Rotation.
+
+ Returns:
+ The inverse of the current Rotation
+ """
+ if self._rot_mats is not None:
+ return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None)
+ elif self._quats is not None:
+ return Rotation(
+ rot_mats=None,
+ quats=invert_quat(self._quats),
+ normalize_quats=False,
+ )
+ else:
+ raise ValueError("Both rotations are None")
+
+ # "Tensor" stuff
+
+ def unsqueeze(self, dim: int) -> Rotation:
+ """
+ Analogous to torch.unsqueeze. The dimension is relative to the shape of the Rotation object.
+
+ Args:
+ dim: A positive or negative dimension index.
+ Returns:
+ The unsqueezed Rotation.
+ """
+ if dim >= len(self.shape):
+ raise ValueError("Invalid dimension")
+
+ if self._rot_mats is not None:
+ rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)
+ return Rotation(rot_mats=rot_mats, quats=None)
+ elif self._quats is not None:
+ quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)
+ return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
+ else:
+ raise ValueError("Both rotations are None")
+
+ @staticmethod
+ def cat(rs: Sequence[Rotation], dim: int) -> Rotation:
+ """
+ Concatenates rotations along one of the batch dimensions. Analogous to torch.cat().
+
+ Note that the output of this operation is always a rotation matrix, regardless of the format of input
+ rotations.
+
+ Args:
+ rs:
+ A list of rotation objects
+ dim:
+ The dimension along which the rotations should be concatenated
+ Returns:
+ A concatenated Rotation object in rotation matrix format
+ """
+ rot_mats = torch.cat(
+ [r.get_rot_mats() for r in rs],
+ dim=dim if dim >= 0 else dim - 2,
+ )
+
+ return Rotation(rot_mats=rot_mats, quats=None)
+
+ def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rotation:
+ """
+ Apply a Tensor -> Tensor function to underlying rotation tensors, mapping over the rotation dimension(s). Can
+ be used e.g. to sum out a one-hot batch dimension.
+
+ Args:
+ fn:
+ A Tensor -> Tensor function to be mapped over the Rotation
+ Returns:
+ The transformed Rotation object
+ """
+ if self._rot_mats is not None:
+ rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))
+ rot_mats = torch.stack(list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1)
+ rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))
+ return Rotation(rot_mats=rot_mats, quats=None)
+ elif self._quats is not None:
+ quats = torch.stack(list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1)
+ return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
+ else:
+ raise ValueError("Both rotations are None")
+
+ def cuda(self) -> Rotation:
+ """
+ Analogous to the cuda() method of torch Tensors
+
+ Returns:
+ A copy of the Rotation in CUDA memory
+ """
+ if self._rot_mats is not None:
+ return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)
+ elif self._quats is not None:
+ return Rotation(rot_mats=None, quats=self._quats.cuda(), normalize_quats=False)
+ else:
+ raise ValueError("Both rotations are None")
+
+ def to(self, device: Optional[torch.device], dtype: Optional[torch.dtype]) -> Rotation:
+ """
+ Analogous to the to() method of torch Tensors
+
+ Args:
+ device:
+ A torch device
+ dtype:
+ A torch dtype
+ Returns:
+ A copy of the Rotation using the new device and dtype
+ """
+ if self._rot_mats is not None:
+ return Rotation(
+ rot_mats=self._rot_mats.to(device=device, dtype=dtype),
+ quats=None,
+ )
+ elif self._quats is not None:
+ return Rotation(
+ rot_mats=None,
+ quats=self._quats.to(device=device, dtype=dtype),
+ normalize_quats=False,
+ )
+ else:
+ raise ValueError("Both rotations are None")
+
+ def detach(self) -> Rotation:
+ """
+ Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph.
+
+ Returns:
+ A copy of the Rotation whose underlying Tensor has been detached from its torch graph
+ """
+ if self._rot_mats is not None:
+ return Rotation(rot_mats=self._rot_mats.detach(), quats=None)
+ elif self._quats is not None:
+ return Rotation(
+ rot_mats=None,
+ quats=self._quats.detach(),
+ normalize_quats=False,
+ )
+ else:
+ raise ValueError("Both rotations are None")
+
+
+class Rigid:
+ """
+ A class representing a rigid transformation. Little more than a wrapper around two objects: a Rotation object and a
+ [*, 3] translation Designed to behave approximately like a single torch tensor with the shape of the shared batch
+ dimensions of its component parts.
+ """
+
+ def __init__(self, rots: Optional[Rotation], trans: Optional[torch.Tensor]):
+ """
+ Args:
+ rots: A [*, 3, 3] rotation tensor
+ trans: A corresponding [*, 3] translation tensor
+ """
+ # (we need device, dtype, etc. from at least one input)
+
+ batch_dims, dtype, device, requires_grad = None, None, None, None
+ if trans is not None:
+ batch_dims = trans.shape[:-1]
+ dtype = trans.dtype
+ device = trans.device
+ requires_grad = trans.requires_grad
+ elif rots is not None:
+ batch_dims = rots.shape
+ dtype = rots.dtype
+ device = rots.device
+ requires_grad = rots.requires_grad
+ else:
+ raise ValueError("At least one input argument must be specified")
+
+ if rots is None:
+ rots = Rotation.identity(
+ batch_dims,
+ dtype,
+ device,
+ requires_grad,
+ )
+ elif trans is None:
+ trans = identity_trans(
+ batch_dims,
+ dtype,
+ device,
+ requires_grad,
+ )
+
+ assert rots is not None
+ assert trans is not None
+
+ if (rots.shape != trans.shape[:-1]) or (rots.device != trans.device):
+ raise ValueError("Rots and trans incompatible")
+
+ # Force full precision. Happens to the rotations automatically.
+ trans = trans.to(dtype=torch.float32)
+
+ self._rots = rots
+ self._trans = trans
+
+ @staticmethod
+ def identity(
+ shape: Tuple[int, ...],
+ dtype: Optional[torch.dtype] = None,
+ device: Optional[torch.device] = None,
+ requires_grad: bool = True,
+ fmt: str = "quat",
+ ) -> Rigid:
+ """
+ Constructs an identity transformation.
+
+ Args:
+ shape:
+ The desired shape
+ dtype:
+ The dtype of both internal tensors
+ device:
+ The device of both internal tensors
+ requires_grad:
+ Whether grad should be enabled for the internal tensors
+ Returns:
+ The identity transformation
+ """
+ return Rigid(
+ Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),
+ identity_trans(shape, dtype, device, requires_grad),
+ )
+
+ def __getitem__(self, index: Any) -> Rigid:
+ """
+ Indexes the affine transformation with PyTorch-style indices. The index is applied to the shared dimensions of
+ both the rotation and the translation.
+
+ E.g.::
+
+ r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None) t = Rigid(r, torch.rand(10, 10, 3)) indexed =
+ t[3, 4:6] assert(indexed.shape == (2,)) assert(indexed.get_rots().shape == (2,))
+ assert(indexed.get_trans().shape == (2, 3))
+
+ Args:
+ index: A standard torch tensor index. E.g. 8, (10, None, 3),
+ or (3, slice(0, 1, None))
+ Returns:
+ The indexed tensor
+ """
+ if type(index) != tuple:
+ index = (index,)
+
+ return Rigid(
+ self._rots[index],
+ self._trans[index + (slice(None),)],
+ )
+
+ def __mul__(self, right: torch.Tensor) -> Rigid:
+ """
+ Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.
+
+ Args:
+ right:
+ The tensor multiplicand
+ Returns:
+ The product
+ """
+ if not (isinstance(right, torch.Tensor)):
+ raise TypeError("The other multiplicand must be a Tensor")
+
+ new_rots = self._rots * right
+ new_trans = self._trans * right[..., None]
+
+ return Rigid(new_rots, new_trans)
+
+ def __rmul__(self, left: torch.Tensor) -> Rigid:
+ """
+ Reverse pointwise multiplication of the transformation with a tensor.
+
+ Args:
+ left:
+ The left multiplicand
+ Returns:
+ The product
+ """
+ return self.__mul__(left)
+
+ @property
+ def shape(self) -> torch.Size:
+ """
+ Returns the shape of the shared dimensions of the rotation and the translation.
+
+ Returns:
+ The shape of the transformation
+ """
+ return self._trans.shape[:-1]
+
+ @property
+ def device(self) -> torch.device:
+ """
+ Returns the device on which the Rigid's tensors are located.
+
+ Returns:
+ The device on which the Rigid's tensors are located
+ """
+ return self._trans.device
+
+ def get_rots(self) -> Rotation:
+ """
+ Getter for the rotation.
+
+ Returns:
+ The rotation object
+ """
+ return self._rots
+
+ def get_trans(self) -> torch.Tensor:
+ """
+ Getter for the translation.
+
+ Returns:
+ The stored translation
+ """
+ return self._trans
+
+ def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid:
+ """
+ Composes the transformation with a quaternion update vector of shape [*, 6], where the final 6 columns
+ represent the x, y, and z values of a quaternion of form (1, x, y, z) followed by a 3D translation.
+
+ Args:
+ q_vec: The quaternion update vector.
+ Returns:
+ The composed transformation.
+ """
+ q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]
+ new_rots = self._rots.compose_q_update_vec(q_vec)
+
+ trans_update = self._rots.apply(t_vec)
+ new_translation = self._trans + trans_update
+
+ return Rigid(new_rots, new_translation)
+
+ def compose(self, r: Rigid) -> Rigid:
+ """
+ Composes the current rigid object with another.
+
+ Args:
+ r:
+ Another Rigid object
+ Returns:
+ The composition of the two transformations
+ """
+ new_rot = self._rots.compose_r(r._rots)
+ new_trans = self._rots.apply(r._trans) + self._trans
+ return Rigid(new_rot, new_trans)
+
+ def apply(self, pts: torch.Tensor) -> torch.Tensor:
+ """
+ Applies the transformation to a coordinate tensor.
+
+ Args:
+ pts: A [*, 3] coordinate tensor.
+ Returns:
+ The transformed points.
+ """
+ rotated = self._rots.apply(pts)
+ return rotated + self._trans
+
+ def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
+ """
+ Applies the inverse of the transformation to a coordinate tensor.
+
+ Args:
+ pts: A [*, 3] coordinate tensor
+ Returns:
+ The transformed points.
+ """
+ pts = pts - self._trans
+ return self._rots.invert_apply(pts)
+
+ def invert(self) -> Rigid:
+ """
+ Inverts the transformation.
+
+ Returns:
+ The inverse transformation.
+ """
+ rot_inv = self._rots.invert()
+ trn_inv = rot_inv.apply(self._trans)
+
+ return Rigid(rot_inv, -1 * trn_inv)
+
+ def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
+ """
+ Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the
+ translation/rotation dimensions respectively.
+
+ Args:
+ fn:
+ A Tensor -> Tensor function to be mapped over the Rigid
+ Returns:
+ The transformed Rigid object
+ """
+ new_rots = self._rots.map_tensor_fn(fn)
+ new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1)
+
+ return Rigid(new_rots, new_trans)
+
+ def to_tensor_4x4(self) -> torch.Tensor:
+ """
+ Converts a transformation to a homogenous transformation tensor.
+
+ Returns:
+ A [*, 4, 4] homogenous transformation tensor
+ """
+ tensor = self._trans.new_zeros((*self.shape, 4, 4))
+ tensor[..., :3, :3] = self._rots.get_rot_mats()
+ tensor[..., :3, 3] = self._trans
+ tensor[..., 3, 3] = 1
+ return tensor
+
+ @staticmethod
+ def from_tensor_4x4(t: torch.Tensor) -> Rigid:
+ """
+ Constructs a transformation from a homogenous transformation tensor.
+
+ Args:
+ t: [*, 4, 4] homogenous transformation tensor
+ Returns:
+ T object with shape [*]
+ """
+ if t.shape[-2:] != (4, 4):
+ raise ValueError("Incorrectly shaped input tensor")
+
+ rots = Rotation(rot_mats=t[..., :3, :3], quats=None)
+ trans = t[..., :3, 3]
+
+ return Rigid(rots, trans)
+
+ def to_tensor_7(self) -> torch.Tensor:
+ """
+ Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the
+ translation.
+
+ Returns:
+ A [*, 7] tensor representation of the transformation
+ """
+ tensor = self._trans.new_zeros((*self.shape, 7))
+ tensor[..., :4] = self._rots.get_quats()
+ tensor[..., 4:] = self._trans
+
+ return tensor
+
+ @staticmethod
+ def from_tensor_7(t: torch.Tensor, normalize_quats: bool = False) -> Rigid:
+ if t.shape[-1] != 7:
+ raise ValueError("Incorrectly shaped input tensor")
+
+ quats, trans = t[..., :4], t[..., 4:]
+
+ rots = Rotation(rot_mats=None, quats=quats, normalize_quats=normalize_quats)
+
+ return Rigid(rots, trans)
+
+ @staticmethod
+ def from_3_points(
+ p_neg_x_axis: torch.Tensor, origin: torch.Tensor, p_xy_plane: torch.Tensor, eps: float = 1e-8
+ ) -> Rigid:
+ """
+ Implements algorithm 21. Constructs transformations from sets of 3 points using the Gram-Schmidt algorithm.
+
+ Args:
+ p_neg_x_axis: [*, 3] coordinates
+ origin: [*, 3] coordinates used as frame origins
+ p_xy_plane: [*, 3] coordinates
+ eps: Small epsilon value
+ Returns:
+ A transformation object of shape [*]
+ """
+ p_neg_x_axis_unbound = torch.unbind(p_neg_x_axis, dim=-1)
+ origin_unbound = torch.unbind(origin, dim=-1)
+ p_xy_plane_unbound = torch.unbind(p_xy_plane, dim=-1)
+
+ e0 = [c1 - c2 for c1, c2 in zip(origin_unbound, p_neg_x_axis_unbound)]
+ e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane_unbound, origin_unbound)]
+
+ denom = torch.sqrt(sum(c * c for c in e0) + eps * torch.ones_like(e0[0]))
+ e0 = [c / denom for c in e0]
+ dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))
+ e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]
+ denom = torch.sqrt(sum((c * c for c in e1)) + eps * torch.ones_like(e1[0]))
+ e1 = [c / denom for c in e1]
+ e2 = [
+ e0[1] * e1[2] - e0[2] * e1[1],
+ e0[2] * e1[0] - e0[0] * e1[2],
+ e0[0] * e1[1] - e0[1] * e1[0],
+ ]
+
+ rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)
+ rots = rots.reshape(rots.shape[:-1] + (3, 3))
+
+ rot_obj = Rotation(rot_mats=rots, quats=None)
+
+ return Rigid(rot_obj, torch.stack(origin_unbound, dim=-1))
+
+ def unsqueeze(self, dim: int) -> Rigid:
+ """
+ Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.
+
+ Args:
+ dim: A positive or negative dimension index.
+ Returns:
+ The unsqueezed transformation.
+ """
+ if dim >= len(self.shape):
+ raise ValueError("Invalid dimension")
+ rots = self._rots.unsqueeze(dim)
+ trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)
+
+ return Rigid(rots, trans)
+
+ @staticmethod
+ def cat(ts: Sequence[Rigid], dim: int) -> Rigid:
+ """
+ Concatenates transformations along a new dimension.
+
+ Args:
+ ts:
+ A list of T objects
+ dim:
+ The dimension along which the transformations should be concatenated
+ Returns:
+ A concatenated transformation object
+ """
+ rots = Rotation.cat([t._rots for t in ts], dim)
+ trans = torch.cat([t._trans for t in ts], dim=dim if dim >= 0 else dim - 1)
+
+ return Rigid(rots, trans)
+
+ def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:
+ """
+ Applies a Rotation -> Rotation function to the stored rotation object.
+
+ Args:
+ fn: A function of type Rotation -> Rotation
+ Returns:
+ A transformation object with a transformed rotation.
+ """
+ return Rigid(fn(self._rots), self._trans)
+
+ def apply_trans_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
+ """
+ Applies a Tensor -> Tensor function to the stored translation.
+
+ Args:
+ fn:
+ A function of type Tensor -> Tensor to be applied to the translation
+ Returns:
+ A transformation object with a transformed translation.
+ """
+ return Rigid(self._rots, fn(self._trans))
+
+ def scale_translation(self, trans_scale_factor: float) -> Rigid:
+ """
+ Scales the translation by a constant factor.
+
+ Args:
+ trans_scale_factor:
+ The constant factor
+ Returns:
+ A transformation object with a scaled translation.
+ """
+ return self.apply_trans_fn(lambda t: t * trans_scale_factor)
+
+ def stop_rot_gradient(self) -> Rigid:
+ """
+ Detaches the underlying rotation object
+
+ Returns:
+ A transformation object with detached rotations
+ """
+ return self.apply_rot_fn(lambda r: r.detach())
+
+ @staticmethod
+ def make_transform_from_reference(
+ n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float = 1e-20
+ ) -> Rigid:
+ """
+ Returns a transformation object from reference coordinates.
+
+ Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard
+ way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You
+ need to take care of such cases in your code.
+
+ Args:
+ n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.
+ ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.
+ c_xyz: A [*, 3] tensor of carbon xyz coordinates.
+ Returns:
+ A transformation object. After applying the translation and rotation to the reference backbone, the
+ coordinates will approximately equal to the input coordinates.
+ """
+ translation = -1 * ca_xyz
+ n_xyz = n_xyz + translation
+ c_xyz = c_xyz + translation
+
+ c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]
+ norm = torch.sqrt(eps + c_x**2 + c_y**2)
+ sin_c1 = -c_y / norm
+ cos_c1 = c_x / norm
+
+ c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))
+ c1_rots[..., 0, 0] = cos_c1
+ c1_rots[..., 0, 1] = -1 * sin_c1
+ c1_rots[..., 1, 0] = sin_c1
+ c1_rots[..., 1, 1] = cos_c1
+ c1_rots[..., 2, 2] = 1
+
+ norm = torch.sqrt(eps + c_x**2 + c_y**2 + c_z**2)
+ sin_c2 = c_z / norm
+ cos_c2 = torch.sqrt(c_x**2 + c_y**2) / norm
+
+ c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
+ c2_rots[..., 0, 0] = cos_c2
+ c2_rots[..., 0, 2] = sin_c2
+ c2_rots[..., 1, 1] = 1
+ c2_rots[..., 2, 0] = -1 * sin_c2
+ c2_rots[..., 2, 2] = cos_c2
+
+ c_rots = rot_matmul(c2_rots, c1_rots)
+ n_xyz = rot_vec_mul(c_rots, n_xyz)
+
+ _, n_y, n_z = [n_xyz[..., i] for i in range(3)]
+ norm = torch.sqrt(eps + n_y**2 + n_z**2)
+ sin_n = -n_z / norm
+ cos_n = n_y / norm
+
+ n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
+ n_rots[..., 0, 0] = 1
+ n_rots[..., 1, 1] = cos_n
+ n_rots[..., 1, 2] = -1 * sin_n
+ n_rots[..., 2, 1] = sin_n
+ n_rots[..., 2, 2] = cos_n
+
+ rots = rot_matmul(n_rots, c_rots)
+
+ rots = rots.transpose(-1, -2)
+ translation = -1 * translation
+
+ rot_obj = Rotation(rot_mats=rots, quats=None)
+
+ return Rigid(rot_obj, translation)
+
+ def cuda(self) -> Rigid:
+ """
+ Moves the transformation object to GPU memory
+
+ Returns:
+ A version of the transformation on GPU
+ """
+ return Rigid(self._rots.cuda(), self._trans.cuda())
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/tensor_utils.py b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/tensor_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..99dd6dbe47b68247794e51810fd274c6352e5b4f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/openfold_utils/tensor_utils.py
@@ -0,0 +1,144 @@
+# Copyright 2021 AlQuraishi Laboratory
+# Copyright 2021 DeepMind Technologies Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from functools import partial
+from typing import Any, Callable, Dict, List, Type, TypeVar, Union, overload
+
+import torch
+import torch.nn as nn
+import torch.types
+
+
+def add(m1: torch.Tensor, m2: torch.Tensor, inplace: bool) -> torch.Tensor:
+ # The first operation in a checkpoint can't be in-place, but it's
+ # nice to have in-place addition during inference. Thus...
+ if not inplace:
+ m1 = m1 + m2
+ else:
+ m1 += m2
+
+ return m1
+
+
+def permute_final_dims(tensor: torch.Tensor, inds: List[int]) -> torch.Tensor:
+ zero_index = -1 * len(inds)
+ first_inds = list(range(len(tensor.shape[:zero_index])))
+ return tensor.permute(first_inds + [zero_index + i for i in inds])
+
+
+def flatten_final_dims(t: torch.Tensor, no_dims: int) -> torch.Tensor:
+ return t.reshape(t.shape[:-no_dims] + (-1,))
+
+
+def masked_mean(mask: torch.Tensor, value: torch.Tensor, dim: int, eps: float = 1e-4) -> torch.Tensor:
+ mask = mask.expand(*value.shape)
+ return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim))
+
+
+def pts_to_distogram(
+ pts: torch.Tensor, min_bin: torch.types.Number = 2.3125, max_bin: torch.types.Number = 21.6875, no_bins: int = 64
+) -> torch.Tensor:
+ boundaries = torch.linspace(min_bin, max_bin, no_bins - 1, device=pts.device)
+ dists = torch.sqrt(torch.sum((pts.unsqueeze(-2) - pts.unsqueeze(-3)) ** 2, dim=-1))
+ return torch.bucketize(dists, boundaries)
+
+
+def dict_multimap(fn: Callable[[list], Any], dicts: List[dict]) -> dict:
+ first = dicts[0]
+ new_dict = {}
+ for k, v in first.items():
+ all_v = [d[k] for d in dicts]
+ if isinstance(v, dict):
+ new_dict[k] = dict_multimap(fn, all_v)
+ else:
+ new_dict[k] = fn(all_v)
+
+ return new_dict
+
+
+def one_hot(x: torch.Tensor, v_bins: torch.Tensor) -> torch.Tensor:
+ reshaped_bins = v_bins.view(((1,) * len(x.shape)) + (len(v_bins),))
+ diffs = x[..., None] - reshaped_bins
+ am = torch.argmin(torch.abs(diffs), dim=-1)
+ return nn.functional.one_hot(am, num_classes=len(v_bins)).float()
+
+
+def batched_gather(data: torch.Tensor, inds: torch.Tensor, dim: int = 0, no_batch_dims: int = 0) -> torch.Tensor:
+ ranges: List[Union[slice, torch.Tensor]] = []
+ for i, s in enumerate(data.shape[:no_batch_dims]):
+ r = torch.arange(s)
+ r = r.view(*(*((1,) * i), -1, *((1,) * (len(inds.shape) - i - 1))))
+ ranges.append(r)
+
+ remaining_dims: List[Union[slice, torch.Tensor]] = [slice(None) for _ in range(len(data.shape) - no_batch_dims)]
+ remaining_dims[dim - no_batch_dims if dim >= 0 else dim] = inds
+ ranges.extend(remaining_dims)
+ # Matt note: Editing this to get around the behaviour of using a list as an array index changing
+ # in recent Numpy versions
+ return data[tuple(ranges)]
+
+
+T = TypeVar("T")
+
+
+# With tree_map, a poor man's JAX tree_map
+def dict_map(
+ fn: Callable[[T], Any], dic: Dict[Any, Union[dict, list, tuple, T]], leaf_type: Type[T]
+) -> Dict[Any, Union[dict, list, tuple, Any]]:
+ new_dict: Dict[Any, Union[dict, list, tuple, Any]] = {}
+ for k, v in dic.items():
+ if isinstance(v, dict):
+ new_dict[k] = dict_map(fn, v, leaf_type)
+ else:
+ new_dict[k] = tree_map(fn, v, leaf_type)
+
+ return new_dict
+
+
+@overload
+def tree_map(fn: Callable[[T], Any], tree: T, leaf_type: Type[T]) -> Any:
+ ...
+
+
+@overload
+def tree_map(fn: Callable[[T], Any], tree: dict, leaf_type: Type[T]) -> dict:
+ ...
+
+
+@overload
+def tree_map(fn: Callable[[T], Any], tree: list, leaf_type: Type[T]) -> list:
+ ...
+
+
+@overload
+def tree_map(fn: Callable[[T], Any], tree: tuple, leaf_type: Type[T]) -> tuple:
+ ...
+
+
+def tree_map(fn, tree, leaf_type):
+ if isinstance(tree, dict):
+ return dict_map(fn, tree, leaf_type)
+ elif isinstance(tree, list):
+ return [tree_map(fn, x, leaf_type) for x in tree]
+ elif isinstance(tree, tuple):
+ return tuple(tree_map(fn, x, leaf_type) for x in tree)
+ elif isinstance(tree, leaf_type):
+ return fn(tree)
+ else:
+ print(type(tree))
+ raise ValueError("Not supported")
+
+
+tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/esm/tokenization_esm.py b/venv/lib/python3.10/site-packages/transformers/models/esm/tokenization_esm.py
new file mode 100644
index 0000000000000000000000000000000000000000..27a889c87ea0b42397ed1553608aa2e5db2f85bc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/esm/tokenization_esm.py
@@ -0,0 +1,143 @@
+# coding=utf-8
+# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for ESM."""
+import os
+from typing import List, Optional
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+def load_vocab_file(vocab_file):
+ with open(vocab_file, "r") as f:
+ lines = f.read().splitlines()
+ return [l.strip() for l in lines]
+
+
+class EsmTokenizer(PreTrainedTokenizer):
+ """
+ Constructs an ESM tokenizer.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ cls_token="",
+ pad_token="",
+ mask_token="",
+ eos_token="",
+ **kwargs,
+ ):
+ self.all_tokens = load_vocab_file(vocab_file)
+ self._id_to_token = dict(enumerate(self.all_tokens))
+ self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
+ super().__init__(
+ unk_token=unk_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ eos_token=eos_token,
+ **kwargs,
+ )
+
+ # TODO, all the tokens are added? But they are also part of the vocab... bit strange.
+ # none of them are special, but they all need special splitting.
+
+ self.unique_no_split_tokens = self.all_tokens
+ self._update_trie(self.unique_no_split_tokens)
+
+ def _convert_id_to_token(self, index: int) -> str:
+ return self._id_to_token.get(index, self.unk_token)
+
+ def _convert_token_to_id(self, token: str) -> int:
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
+
+ def _tokenize(self, text, **kwargs):
+ return text.split()
+
+ def get_vocab(self):
+ base_vocab = self._token_to_id.copy()
+ base_vocab.update(self.added_tokens_encoder)
+ return base_vocab
+
+ def token_to_id(self, token: str) -> int:
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
+
+ def id_to_token(self, index: int) -> str:
+ return self._id_to_token.get(index, self.unk_token)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ cls = [self.cls_token_id]
+ sep = [self.eos_token_id] # No sep token in ESM vocabulary
+ if token_ids_1 is None:
+ if self.eos_token_id is None:
+ return cls + token_ids_0
+ else:
+ return cls + token_ids_0 + sep
+ elif self.eos_token_id is None:
+ raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!")
+ return cls + token_ids_0 + sep + token_ids_1 + sep # Multiple inputs always have an EOS token
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids of the first sequence.
+ token_ids_1 (`List[int]`, *optional*):
+ List of ids of the second sequence.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ if token_ids_1 is not None:
+ raise ValueError(
+ "You should not supply a second sequence if the provided sequence of "
+ "ids is already formatted with special tokens for the model."
+ )
+
+ return [1 if token in self.all_special_ids else 0 for token in token_ids_0]
+ mask = [1] + ([0] * len(token_ids_0)) + [1]
+ if token_ids_1 is not None:
+ mask += [0] * len(token_ids_1) + [1]
+ return mask
+
+ def save_vocabulary(self, save_directory, filename_prefix):
+ vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt")
+ with open(vocab_file, "w") as f:
+ f.write("\n".join(self.all_tokens))
+ return (vocab_file,)
+
+ @property
+ def vocab_size(self) -> int:
+ return len(self.all_tokens)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c73bee0551d8c323a58643e00bdfac3b9d4f5fb2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/phi/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/phi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba79ac81a6b9e55c3881c27d7772109113b9d803
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/phi/__init__.py
@@ -0,0 +1,69 @@
+# Copyright 2023 Microsoft and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_phi": ["PHI_PRETRAINED_CONFIG_ARCHIVE_MAP", "PhiConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_phi"] = [
+ "PHI_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "PhiPreTrainedModel",
+ "PhiModel",
+ "PhiForCausalLM",
+ "PhiForSequenceClassification",
+ "PhiForTokenClassification",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_phi import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP, PhiConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_phi import (
+ PHI_PRETRAINED_MODEL_ARCHIVE_LIST,
+ PhiForCausalLM,
+ PhiForSequenceClassification,
+ PhiForTokenClassification,
+ PhiModel,
+ PhiPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a14689db1a6bdf6e196603b9d3c55a1b3b77331
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/configuration_phi.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/configuration_phi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e66075c7c4bf160a78cccffdfe98e03857a49ec6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/configuration_phi.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/convert_phi_weights_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/convert_phi_weights_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d26a12853edf38cee62e651baa003e167ea6de41
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/convert_phi_weights_to_hf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/modeling_phi.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/modeling_phi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4105c18732cf0ef88ba4aa8ddc980f524773fec5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/phi/__pycache__/modeling_phi.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/phi/configuration_phi.py b/venv/lib/python3.10/site-packages/transformers/models/phi/configuration_phi.py
new file mode 100644
index 0000000000000000000000000000000000000000..59d63ae65da062190888853603afa7a56642c43d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/phi/configuration_phi.py
@@ -0,0 +1,191 @@
+# coding=utf-8
+# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" Phi model configuration"""
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import PHI_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class PhiConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the Phi
+ [microsoft/phi-1](https://huggingface.co/microsoft/phi-1).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 51200):
+ Vocabulary size of the Phi model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`PhiModel`].
+ hidden_size (`int`, *optional*, defaults to 2048):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 8192):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer decoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
+ `num_attention_heads`.
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
+ Dropout probability for mlp outputs.
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
+ The dropout ratio for the embeddings.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio after computing the attention scores.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Phi-1 and Phi-1.5 supports up to 2048
+ tokens.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format
+ is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalPersimmon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This
+ is an experimental feature, subject to breaking API changes in future versions.
+ partial_rotary_factor (`float`, *optional*, defaults to 0.5):
+ Percentage of the query and keys which will have rotary embedding.
+ qk_layernorm (`bool`, *optional*, defaults to `False`):
+ Whether or not to normalize the Queries and Keys after projecting the hidden states.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ Denotes beginning of sequences token id.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ Denotes end of sequences token id.
+
+ Example:
+
+ ```python
+ >>> from transformers import PhiModel, PhiConfig
+
+ >>> # Initializing a Phi-1 style configuration
+ >>> configuration = PhiConfig.from_pretrained("microsoft/phi-1")
+
+ >>> # Initializing a model from the configuration
+ >>> model = PhiModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "phi"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=51200,
+ hidden_size=2048,
+ intermediate_size=8192,
+ num_hidden_layers=24,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ resid_pdrop=0.0,
+ embd_pdrop=0.0,
+ attention_dropout=0.0,
+ hidden_act="gelu_new",
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ use_cache=True,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ partial_rotary_factor=0.5,
+ qk_layernorm=False,
+ bos_token_id=1,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.resid_pdrop = resid_pdrop
+ self.embd_pdrop = embd_pdrop
+ self.attention_dropout = attention_dropout
+ self.hidden_act = hidden_act
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self.partial_rotary_factor = partial_rotary_factor
+ self.qk_layernorm = qk_layernorm
+ self._rope_scaling_validation()
+
+ super().__init__(
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
diff --git a/venv/lib/python3.10/site-packages/transformers/models/phi/convert_phi_weights_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/phi/convert_phi_weights_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..69ef4c5919ed9b4881158ee5d9fa5ef92c128d77
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/phi/convert_phi_weights_to_hf.py
@@ -0,0 +1,207 @@
+# coding=utf-8
+# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Weights conversion script for Phi
+
+This script downloads both Phi-1 and Phi-1.5 checkpoints to "checkpoint_path" and then converts the weights to
+HugfgingFace model's format and saves them in "pytorch_dump_folder_path".
+
+Example : $python ./convert_phi_weights_to_hf.py --model_name "microsoft/phi-2" --pytorch_dump_folder ./dump_folder/ --checkpoint_path ./ckpt_path/
+"""
+
+import argparse
+import gc
+import os
+
+import safetensors
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import PhiConfig, PhiForCausalLM
+
+
+_MODELS = {
+ "microsoft/phi-1": ["https://huggingface.co/microsoft/phi-1/blob/main/pytorch_model.bin"],
+ "microsoft/phi-1_5": ["https://huggingface.co/microsoft/phi-1_5/blob/main/pytorch_model.bin"],
+ "microsoft/phi-2": [
+ "https://huggingface.co/microsoft/phi-2/blob/main/model-00001-of-00002.safetensors",
+ "https://huggingface.co/microsoft/phi-2/blob/main/model-00002-of-00002.safetensors",
+ ],
+}
+
+PHI_MAPPING = {
+ "transformer.embd.wte.weight": "model.embed_tokens.weight",
+ "lm_head.linear": "lm_head",
+ "lm_head.ln": "model.final_layernorm",
+ "layers": "model.layers",
+ "transformer": "model",
+ ".h.": ".layers.",
+ "ln": "input_layernorm",
+ "mixer": "self_attn",
+ "Wqkv": "query_key_value",
+ "out_proj": "dense",
+}
+
+
+def convert_weights(original_weights, mapping, config):
+ converted_weights = {}
+ original_weights_keys = sorted(original_weights.keys())
+
+ for original_weights_key in original_weights_keys:
+ new_key = original_weights_key
+
+ if "rotary_emb" in new_key:
+ continue
+
+ if "Wqkv" in new_key:
+ if "weight" in new_key:
+ weight = original_weights[new_key]
+ weights_shape = weight.shape
+ weight = (
+ weight.view(3, config.num_attention_heads, -1, config.hidden_size)
+ .transpose(0, 1)
+ .reshape(*weights_shape)
+ )
+ original_weights[new_key] = weight
+ elif "bias" in new_key:
+ bias = original_weights[new_key]
+ bias_shape = bias.shape
+ bias = bias.view(3, config.num_attention_heads, -1).transpose(0, 1).reshape(*bias_shape)
+ original_weights[new_key] = bias
+
+ for k, v in mapping.items():
+ if k in new_key:
+ new_key = new_key.replace(k, v)
+
+ converted_weights[new_key] = original_weights.pop(original_weights_key)
+
+ return converted_weights
+
+
+def _download(url: str, root: str):
+ repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}"
+ filename = f"{url.split('/')[-1]}"
+ hf_hub_download(
+ repo_id=repo_id,
+ filename=filename,
+ force_filename=root,
+ local_dir_use_symlinks=False,
+ )
+
+
+def convert_phi_weights(
+ model_name, checkpoint_path, pytorch_dump_folder_path, use_cuda, save_weights_directly, _MODELS
+):
+ _MODELS = _MODELS if model_name not in _MODELS.keys() else {model_name: _MODELS.get(model_name)}
+ device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
+ for model_name, model_url in _MODELS.items():
+ converted_checkpoint = {}
+ model_checkpoint = {}
+
+ # for phi-2 the weights are stored in 2 different safetensors file so we need to iterate over that list and download one at a time
+ for model_each_url in model_url:
+ model_path = os.path.join(checkpoint_path, model_name + "_" + model_each_url.split("/")[-1])
+ if not os.path.exists(model_path):
+ print(f"\n{model_name} was not found! Downloading it to {model_path}")
+ _download(url=model_each_url, root=model_path)
+
+ if model_path.endswith("safetensors"):
+ loaded_weights = safetensors.torch.load_file(model_path, device=device)
+ else:
+ loaded_weights = torch.load(model_path, map_location=device)
+ model_checkpoint.update(**loaded_weights)
+
+ model_type = model_name.split("/")[1] # phi-1 or phi-1_5 or phi-2
+
+ # init the config for phi-1 and phi-1.5
+ config = PhiConfig()
+ # if we are dealing with phi-2 then update the config
+ if model_type == "phi-2":
+ config.hidden_size = 2560
+ config.intermediate_size = 10240
+ config.num_hidden_layers = 32
+ config.resid_pdrop = 0.1
+ config.partial_rotary_factor = 0.4
+ config.num_hidden_layers = 32
+ config.torch_dtype = "float16"
+
+ # Converting the weights
+ converted_checkpoint.update(**convert_weights(model_checkpoint, PHI_MAPPING, config))
+
+ # Save either the whole model or the converted weights
+ if save_weights_directly:
+ save_weights_path = os.path.join(pytorch_dump_folder_path, model_type + "_pytorch_model.bin")
+ torch.save(converted_checkpoint, save_weights_path)
+ print(f"Model weights saved at {save_weights_path}!")
+
+ else:
+ model = PhiForCausalLM(config).to(device)
+ model.load_state_dict(converted_checkpoint, strict=True)
+ save_model_path = os.path.join(pytorch_dump_folder_path, model_type)
+ model.save_pretrained(save_model_path)
+ print(f"Model saved at {save_model_path}!")
+
+ # release GPU memory for the 2nd model if cuda was used.
+ del config, model
+
+ # release GPU memory for the 2nd model if cuda was used.
+ del model_checkpoint, converted_checkpoint
+ if use_cuda:
+ torch.cuda.empty_cache()
+ gc.collect()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # # Required parameters
+ parser.add_argument(
+ "--model_name",
+ type=str,
+ help="Name of the model to convert. (Please enter one of the following: phi-1, phi-1_5, phi-2). If nothing is provided, all models will be converted.",
+ default=None,
+ )
+ parser.add_argument(
+ "--checkpoint_path", type=str, help="Path to the folder of downloaded checkpoints. (Please enter full path)"
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ help="Path to the output PyTorch model. (Please enter full path)",
+ )
+ parser.add_argument(
+ "--use_cuda",
+ default=False,
+ type=bool,
+ help="Whether to load the weights on GPU during conversion or not, False by default",
+ )
+ parser.add_argument(
+ "--save_weights_directly",
+ default=True,
+ type=bool,
+ help="Whether to save the weights directly after conversion or load the weight to the Phi model and then save "
+ "the Phi model along with weights. True by default",
+ )
+
+ args = parser.parse_args()
+ convert_phi_weights(
+ args.model_name,
+ args.checkpoint_path,
+ args.pytorch_dump_folder_path,
+ args.use_cuda,
+ args.save_weights_directly,
+ _MODELS,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/phi/modeling_phi.py b/venv/lib/python3.10/site-packages/transformers/models/phi/modeling_phi.py
new file mode 100644
index 0000000000000000000000000000000000000000..13719166edf9d98fdda7b37fd14fe66b97648cce
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/phi/modeling_phi.py
@@ -0,0 +1,1489 @@
+# coding=utf-8
+# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" PyTorch Phi model."""
+
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from packaging import version
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache
+from ...modeling_attn_mask_utils import (
+ _prepare_4d_causal_attention_mask,
+ _prepare_4d_causal_attention_mask_for_sdpa,
+)
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+ SequenceClassifierOutputWithPast,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ get_torch_version,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_phi import PhiConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "microsoft/phi-1"
+_CONFIG_FOR_DOC = "PhiConfig"
+
+
+from ..deprecated._archive_maps import PHI_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Phi
+class PhiRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+# Copied from transformers.models.falcon.modeling_falcon.FalconLinearScalingRotaryEmbedding with Falcon->Phi
+class PhiLinearScalingRotaryEmbedding(PhiRotaryEmbedding):
+ """PhiRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ self.scaling_factor = scaling_factor
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+ t = t / self.scaling_factor
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+
+# Copied from transformers.models.falcon.modeling_falcon.FalconDynamicNTKScalingRotaryEmbedding with Falcon->Phi
+class PhiDynamicNTKScalingRotaryEmbedding(PhiRotaryEmbedding):
+ """PhiRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ self.scaling_factor = scaling_factor
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+
+ if seq_len > self.max_position_embeddings:
+ base = self.base * (
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
+ ) ** (self.dim / (self.dim - 2))
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+
+# Copied from transformers.models.llama.modeling_llama.rotate_half
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Phi
+class PhiMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+class PhiAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: PhiConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.attention_dropout = config.attention_dropout
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.partial_rotary_factor = config.partial_rotary_factor
+ self.is_causal = True
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=True)
+
+ self.qk_layernorm = config.qk_layernorm
+ if self.qk_layernorm:
+ self.q_layernorm = nn.LayerNorm(
+ config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True
+ )
+ self.k_layernorm = nn.LayerNorm(
+ config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True
+ )
+
+ self._init_rope()
+
+ def _init_rope(self):
+ if self.config.rope_scaling is None:
+ self.rotary_emb = PhiRotaryEmbedding(
+ int(self.partial_rotary_factor * self.head_dim),
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+ else:
+ scaling_type = self.config.rope_scaling["type"]
+ scaling_factor = self.config.rope_scaling["factor"]
+ if scaling_type == "linear":
+ self.rotary_emb = PhiLinearScalingRotaryEmbedding(
+ int(self.partial_rotary_factor * self.head_dim),
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ elif scaling_type == "dynamic":
+ self.rotary_emb = PhiDynamicNTKScalingRotaryEmbedding(
+ int(self.partial_rotary_factor * self.head_dim),
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ else:
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ if self.qk_layernorm:
+ query_states = self.q_layernorm(query_states)
+ key_states = self.k_layernorm(key_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+
+ # Partial rotary embedding
+ query_rot, query_pass = (
+ query_states[..., : self.rotary_emb.dim],
+ query_states[..., self.rotary_emb.dim :],
+ )
+ key_rot, key_pass = (
+ key_states[..., : self.rotary_emb.dim],
+ key_states[..., self.rotary_emb.dim :],
+ )
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
+
+ # [batch_size, seq_length, num_heads, head_dim]
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ # Queries and keys upcast to fp32 is required by Phi-2 to avoid overflow
+ attn_weights = torch.matmul(
+ query_states.to(torch.float32), key_states.to(torch.float32).transpose(2, 3)
+ ) / math.sqrt(self.head_dim)
+
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights + attention_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.dense(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+class PhiFlashAttention2(PhiAttention):
+ """
+ Phi flash attention module. This module inherits from `PhiAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # PhiFlashAttention2 attention does not support output_attentions
+
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ if self.qk_layernorm:
+ query_states = self.q_layernorm(query_states)
+ key_states = self.k_layernorm(key_states)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+
+ # Partial rotary embedding
+ query_rot, query_pass = (
+ query_states[..., : self.rotary_emb.dim],
+ query_states[..., self.rotary_emb.dim :],
+ )
+ key_rot, key_pass = (
+ key_states[..., : self.rotary_emb.dim],
+ key_states[..., self.rotary_emb.dim :],
+ )
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
+
+ # [batch_size, seq_length, num_heads, head_dim]
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ attn_dropout = self.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32.
+
+ if query_states.dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=attn_dropout, softmax_scale=None
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.dense(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class PhiSdpaAttention(PhiAttention):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse("2.2.0")
+
+ """
+ SDPA attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `PhiAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from PhiAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "PhiModel is using PhiSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not "
+ "support `output_attentions=True`. Falling back to the manual attention implementation, but specifying "
+ "the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can "
+ 'be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ if self.qk_layernorm:
+ query_states = self.q_layernorm(query_states)
+ key_states = self.k_layernorm(key_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+
+ # Partial rotary embedding
+ query_rot, query_pass = (
+ query_states[..., : self.rotary_emb.dim],
+ query_states[..., self.rotary_emb.dim :],
+ )
+ key_rot, key_pass = (
+ key_states[..., : self.rotary_emb.dim],
+ key_states[..., self.rotary_emb.dim :],
+ )
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
+
+ # [batch_size, seq_length, num_heads, head_dim]
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos, "partial_rotation_size": self.rotary_emb.dim}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
+ # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
+ # Reference: https://github.com/pytorch/pytorch/issues/112577
+ if self.require_contiguous_qkv and query_states.device.type == "cuda" and attention_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=attention_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.dense(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+PHI_ATTENTION_CLASSES = {
+ "eager": PhiAttention,
+ "flash_attention_2": PhiFlashAttention2,
+ "sdpa": PhiSdpaAttention,
+}
+
+
+class PhiDecoderLayer(nn.Module):
+ def __init__(self, config: PhiConfig, layer_idx: int):
+ super().__init__()
+ self.self_attn = PHI_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
+ self.mlp = PhiMLP(config)
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ attn_outputs, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ attn_outputs = self.resid_dropout(attn_outputs)
+
+ feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states))
+ hidden_states = attn_outputs + feed_forward_hidden_states + residual
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+PHI_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`PhiConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Phi Model outputting raw hidden-states without any specific head on top.",
+ PHI_START_DOCSTRING,
+)
+class PhiPreTrainedModel(PreTrainedModel):
+ config_class = PhiConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["PhiDecoderLayer"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+PHI_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance;
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Phi Model outputting raw hidden-states without any specific head on top.",
+ PHI_START_DOCSTRING,
+)
+class PhiModel(PhiPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PhiDecoderLayer`]
+
+ Args:
+ config: PhiConfig
+ """
+
+ def __init__(self, config: PhiConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.embed_dropout = nn.Dropout(config.embd_pdrop)
+ self.layers = nn.ModuleList(
+ [PhiDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+ self._use_sdpa = config._attn_implementation == "sdpa"
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape[:2]
+ elif inputs_embeds is not None:
+ batch_size, seq_length = inputs_embeds.shape[:2]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ past_key_values_length = 0
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ if use_cache:
+ use_legacy_cache = not isinstance(past_key_values, Cache)
+ if use_legacy_cache:
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ inputs_embeds = self.embed_dropout(inputs_embeds)
+
+ # Attention mask.
+ if self._use_flash_attention_2:
+ # 2d mask is passed through the layers
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ elif self._use_sdpa and not output_attentions:
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask,
+ (batch_size, seq_length),
+ inputs_embeds,
+ past_key_values_length,
+ )
+ else:
+ # 4d mask is passed through the layers
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
+ )
+
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.final_layernorm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+
+class PhiForCausalLM(PhiPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi,bias=False->bias=True
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = PhiModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, PhiForCausalLM
+
+ >>> model = PhiForCausalLM.from_pretrained("microsoft/phi-1")
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1")
+
+ >>> prompt = "This is an example script ."
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ 'This is an example script .\n\n\n\nfrom typing import List\n\ndef find_most_common_letter(words: List[str'
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+ ):
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = past_key_values.get_max_length()
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ The PhiModel with a sequence classification head on top (linear layer).
+
+ [`PhiForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ PHI_START_DOCSTRING,
+)
+# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->PHI,Llama->Phi with self.transformer->self.model, transformer_outputs->model_outputs
+class PhiForSequenceClassification(PhiPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = PhiModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ model_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = model_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + model_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=model_outputs.past_key_values,
+ hidden_states=model_outputs.hidden_states,
+ attentions=model_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ PhiModel with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ PHI_START_DOCSTRING,
+)
+# Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with MPT->PHI,Mpt->Phi,self.transformer->self.model,transformer_outputs->model_outputs
+class PhiForTokenClassification(PhiPreTrainedModel):
+ def __init__(self, config: PhiConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.model = PhiModel(config)
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
+ classifier_dropout = config.classifier_dropout
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
+ classifier_dropout = config.hidden_dropout
+ else:
+ classifier_dropout = 0.1
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **deprecated_arguments,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ model_outputs = self.model(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = model_outputs[0]
+ hidden_states = self.dropout(hidden_states)
+ logits = self.classifier(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ batch_size, seq_length = labels.shape
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
+ )
+
+ if not return_dict:
+ output = (logits,) + model_outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=model_outputs.hidden_states,
+ attentions=model_outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/plbart/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ade03d8aa5cdf8e1634d14d261de1cade1abb58c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/plbart/__init__.py
@@ -0,0 +1,81 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_plbart"] = ["PLBartTokenizer"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_plbart"] = [
+ "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "PLBartForCausalLM",
+ "PLBartForConditionalGeneration",
+ "PLBartForSequenceClassification",
+ "PLBartModel",
+ "PLBartPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_plbart import PLBartTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_plbart import (
+ PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
+ PLBartForCausalLM,
+ PLBartForConditionalGeneration,
+ PLBartForSequenceClassification,
+ PLBartModel,
+ PLBartPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70a24e5f7962c2ea4cd44a29c4503b272a04c904
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/configuration_plbart.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/configuration_plbart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45149a24c30cc9ff6968c774388d29102a4f9c43
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/configuration_plbart.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/convert_plbart_original_checkpoint_to_torch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/convert_plbart_original_checkpoint_to_torch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86af4fd305d54237e7226b753ebe521539a8689a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/convert_plbart_original_checkpoint_to_torch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/modeling_plbart.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/modeling_plbart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f4a12d74d88985b8646e7f70714bdfc8dc0384d9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/modeling_plbart.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/tokenization_plbart.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/tokenization_plbart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb192820824cfaa27b1250d070868918845f3b0f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/plbart/__pycache__/tokenization_plbart.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/configuration_plbart.py b/venv/lib/python3.10/site-packages/transformers/models/plbart/configuration_plbart.py
new file mode 100644
index 0000000000000000000000000000000000000000..555a2fcc7572fff910e5d4f4eb1ef119fde33675
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/plbart/configuration_plbart.py
@@ -0,0 +1,192 @@
+# coding=utf-8
+# Copyright 2022, UCLA NLP, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PLBART model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfigWithPast
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class PLBartConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`PLBartModel`]. It is used to instantiate an
+ PLBART model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the PLBART
+ [uclanlp/plbart-base](https://huggingface.co/uclanlp/plbart-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50005):
+ Vocabulary size of the PLBART model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`PLBartModel`].
+ d_model (`int`, *optional*, defaults to 768):
+ Dimensionality of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 6):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 6):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for classifier.
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ scale_embedding (`bool`, *optional*, defaults to `True`):
+ Scale embeddings by diving by sqrt(d_model).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models)
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+
+ Example:
+
+ ```python
+ >>> from transformers import PLBartConfig, PLBartModel
+
+ >>> # Initializing a PLBART uclanlp/plbart-base style configuration
+ >>> configuration = PLBartConfig()
+
+ >>> # Initializing a model (with random weights) from the uclanlp/plbart-base style configuration
+ >>> model = PLBartModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "plbart"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=50005,
+ max_position_embeddings=1024,
+ encoder_layers=6,
+ encoder_ffn_dim=3072,
+ encoder_attention_heads=12,
+ decoder_layers=6,
+ decoder_ffn_dim=3072,
+ decoder_attention_heads=12,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="gelu",
+ d_model=768,
+ dropout=0.1,
+ attention_dropout=0.1,
+ activation_dropout=0.0,
+ init_std=0.02,
+ classifier_dropout=0.0,
+ scale_embedding=True,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ forced_eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.classifier_dropout = classifier_dropout
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ forced_eos_token_id=forced_eos_token_id,
+ **kwargs,
+ )
+
+
+class PLBartOnnxConfig(OnnxConfigWithPast):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "sequence"}),
+ ("attention_mask", {0: "batch", 1: "sequence"}),
+ ]
+ )
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.use_past:
+ return OrderedDict(
+ [
+ ("last_hidden_state", {0: "batch", 1: "sequence"}),
+ ("past_keys", {0: "batch", 2: "sequence"}),
+ ("encoder_last_hidden_state", {0: "batch", 1: "sequence"}),
+ ]
+ )
+ else:
+ return OrderedDict(
+ [
+ ("last_hidden_state", {0: "batch", 1: "sequence"}),
+ ("encoder_last_hidden_state", {0: "batch", 1: "sequence"}),
+ ]
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py b/venv/lib/python3.10/site-packages/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py
new file mode 100644
index 0000000000000000000000000000000000000000..eac4a27d11c5a08386e698c35b89ac3f6ac3c98c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py
@@ -0,0 +1,94 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+import torch
+from torch import nn
+
+from transformers import PLBartConfig, PLBartForConditionalGeneration, PLBartForSequenceClassification
+
+
+def remove_ignore_keys_(state_dict):
+ ignore_keys = [
+ "encoder.version",
+ "decoder.version",
+ "model.encoder.version",
+ "model.decoder.version",
+ "_float_tensor",
+ "decoder.output_projection.weight",
+ ]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+def convert_fairseq_plbart_checkpoint_from_disk(
+ checkpoint_path, hf_config_path="uclanlp/plbart-base", finetuned=False, classification=False
+):
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
+ remove_ignore_keys_(state_dict)
+ vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0]
+
+ plbart_config = PLBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size)
+
+ state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
+ if not classification:
+ model = PLBartForConditionalGeneration(plbart_config)
+ model.model.load_state_dict(state_dict)
+ if finetuned:
+ model.lm_head = make_linear_from_emb(model.model.shared)
+
+ else:
+ classification_head = {}
+ for key, value in state_dict.copy().items():
+ if key.startswith("classification_heads.sentence_classification_head"):
+ classification_head[key.replace("classification_heads.sentence_classification_head.", "")] = value
+ state_dict.pop(key)
+ model = PLBartForSequenceClassification(plbart_config)
+ model.model.load_state_dict(state_dict)
+ model.classification_head.load_state_dict(classification_head)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument("fairseq_path", type=str, help="model.pt on local filesystem.")
+ parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument(
+ "--hf_config",
+ default="uclanlp/plbart-base",
+ type=str,
+ help="Which huggingface architecture to use: plbart-base",
+ )
+ parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
+ parser.add_argument(
+ "--classification", action="store_true", help="whether the model is a classification checkpoint"
+ )
+ args = parser.parse_args()
+ model = convert_fairseq_plbart_checkpoint_from_disk(
+ args.fairseq_path,
+ hf_config_path=args.hf_config,
+ finetuned=args.finetuned,
+ classification=args.classification,
+ )
+ model.save_pretrained(args.pytorch_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/modeling_plbart.py b/venv/lib/python3.10/site-packages/transformers/models/plbart/modeling_plbart.py
new file mode 100644
index 0000000000000000000000000000000000000000..d60b7ee4b046ee431bf1c29186f56e7384465ab0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/plbart/modeling_plbart.py
@@ -0,0 +1,1765 @@
+# coding=utf-8
+# Copyright 2022, UCLA NLP, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch PLBART model."""
+import copy
+import math
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import (
+ _prepare_4d_attention_mask,
+ _prepare_4d_attention_mask_for_sdpa,
+ _prepare_4d_causal_attention_mask,
+ _prepare_4d_causal_attention_mask_for_sdpa,
+)
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+ Seq2SeqSequenceClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_plbart import PLBartConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "uclanlp/plbart-base"
+_CONFIG_FOR_DOC = "PLBartConfig"
+
+
+from ..deprecated._archive_maps import PLBART_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.mbart.modeling_mbart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int):
+ """
+ Shift input ids one token to the right, and wrap the last non pad token (the token) Note that MBart does not
+ have a single `decoder_start_token_id` in contrast to other Bart-like models.
+ """
+ prev_output_tokens = input_ids.clone()
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ prev_output_tokens.masked_fill_(prev_output_tokens == -100, pad_token_id)
+
+ index_of_eos = (prev_output_tokens.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
+ decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze()
+ prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone()
+ prev_output_tokens[:, 0] = decoder_start_tokens
+
+ return prev_output_tokens
+
+
+# Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->PLBart
+class PLBartLearnedPositionalEmbedding(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int):
+ # PLBart is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ super().__init__(num_embeddings + self.offset, embedding_dim)
+
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
+ """`input_ids' shape is expected to be [bsz x seqlen]."""
+
+ bsz, seq_len = input_ids.shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ ).expand(bsz, -1)
+
+ return super().forward(positions + self.offset)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PLBart
+class PLBartAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[PLBartConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->PLBart, BART->PLBART
+class PLBartEncoderLayer(nn.Module):
+ def __init__(self, config: PLBartConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = PLBART_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ config=config,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ attention_mask: torch.FloatTensor,
+ layer_head_mask: torch.FloatTensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# TODO: Implement attention with SDPA for PLBart.
+PLBART_ATTENTION_CLASSES = {"eager": PLBartAttention}
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->PLBart, BART->PLBART
+class PLBartDecoderLayer(nn.Module):
+ def __init__(self, config: PLBartConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = PLBART_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ is_causal=True,
+ config=config,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = PLBART_ATTENTION_CLASSES[config._attn_implementation](
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ config=config,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->PLBart
+class PLBartClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(
+ self,
+ input_dim: int,
+ inner_dim: int,
+ num_classes: int,
+ pooler_dropout: float,
+ ):
+ super().__init__()
+ self.dense = nn.Linear(input_dim, inner_dim)
+ self.dropout = nn.Dropout(p=pooler_dropout)
+ self.out_proj = nn.Linear(inner_dim, num_classes)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.out_proj(hidden_states)
+ return hidden_states
+
+
+class PLBartPreTrainedModel(PreTrainedModel):
+ config_class = PLBartConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["PLBartDecoderLayer", "PLBartEncoderLayer"]
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+PLBART_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`PLBartConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+PLBART_GENERATION_EXAMPLE = r"""
+ Mask-filling example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, PLBartForConditionalGeneration
+
+ >>> model = PLBartForConditionalGeneration.from_pretrained("uclanlp/plbart-base")
+ >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base")
+
+ >>> # en_XX is the language symbol id for English
+ >>> TXT = " Is 0 the Fibonacci number ? en_XX"
+ >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt").input_ids
+
+ >>> logits = model(input_ids).logits
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
+ >>> probs = logits[0, masked_index].softmax(dim=0)
+ >>> values, predictions = probs.topk(5)
+
+ >>> tokenizer.decode(predictions).split()
+ ['first', 'same', 'highest', 'result', 'number']
+ ```
+"""
+
+PLBART_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint.
+ See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint.
+ See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
+ varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (:
+ obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior:
+ generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (:
+ obj:*torch.Tensor* of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify
+ selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (:
+ obj:*tuple(tuple(torch.FloatTensor))*, *optional*, returned when `use_cache=True` is passed or when
+ `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple
+ having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
+ tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (:
+ obj:*torch.FloatTensor* of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally,
+ instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful
+ if you want more control over how to convert `input_ids` indices into associated vectors than the model's
+ internal embedding lookup matrix.
+ decoder_inputs_embeds (:
+ obj:*torch.FloatTensor* of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.bart.modeling_bart.BartEncoder with Bart->PLBart
+class PLBartEncoder(PLBartPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`PLBartEncoderLayer`].
+
+ Args:
+ config: PLBartConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
+
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+
+ self.embed_positions = PLBartLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ embed_dim,
+ )
+ self.layers = nn.ModuleList([PLBartEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+ self._use_sdpa = config._attn_implementation == "sdpa"
+ self.layernorm_embedding = nn.LayerNorm(embed_dim)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_ids = input_ids.view(-1, input_ids.shape[-1])
+ elif inputs_embeds is not None:
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input)
+ embed_pos = embed_pos.to(inputs_embeds.device)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.layernorm_embedding(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ if self._use_flash_attention_2:
+ attention_mask = attention_mask if 0 in attention_mask else None
+ elif self._use_sdpa and head_mask is None and not output_attentions:
+ # output_attentions=True & head_mask can not be supported when using SDPA, fall back to
+ # the manual implementation that requires a 4D causal mask in all cases.
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
+ else:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoder with Bart->PLBart
+class PLBartDecoder(PLBartPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PLBartDecoderLayer`]
+
+ Args:
+ config: PLBartConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
+
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+
+ self.embed_positions = PLBartLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ )
+ self.layers = nn.ModuleList([PLBartDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+ self._use_sdpa = config._attn_implementation == "sdpa"
+
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_shape = input.shape
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input) * self.embed_scale
+
+ if self._use_flash_attention_2:
+ # 2d mask is passed through the layers
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ elif self._use_sdpa and not output_attentions and cross_attn_head_mask is None:
+ # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
+ # the manual implementation that requires a 4D causal mask in all cases.
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask,
+ input_shape,
+ inputs_embeds,
+ past_key_values_length,
+ )
+ else:
+ # 4d mask is passed through the layers
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ if self._use_flash_attention_2:
+ encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
+ elif self._use_sdpa and cross_attn_head_mask is None and not output_attentions:
+ # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
+ # the manual implementation that requires a 4D causal mask in all cases.
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(
+ encoder_attention_mask,
+ inputs_embeds.dtype,
+ tgt_len=input_shape[-1],
+ )
+ else:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input, past_key_values_length)
+ positions = positions.to(inputs_embeds.device)
+
+ hidden_states = inputs_embeds + positions
+ hidden_states = self.layernorm_embedding(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare PLBART Model outputting raw hidden-states without any specific head on top.",
+ PLBART_START_DOCSTRING,
+)
+class PLBartModel(PLBartPreTrainedModel):
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ def __init__(self, config: PLBartConfig):
+ super().__init__(config)
+
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
+
+ self.encoder = PLBartEncoder(config, self.shared)
+ self.decoder = PLBartDecoder(config, self.shared)
+
+ self.init_weights()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, value):
+ self.shared = value
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Seq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.LongTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # different to other models, PLBart automatically creates decoder_input_ids from
+ # input_ids if no decoder_input_ids are provided
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The PLBART Model with a language modeling head. Can be used for code-to-text, text-to-code and code-to-code.",
+ PLBART_START_DOCSTRING,
+)
+class PLBartForConditionalGeneration(PLBartPreTrainedModel):
+ base_model_prefix = "model"
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: PLBartConfig):
+ super().__init__(config)
+ self.model = PLBartModel(config)
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
+
+ self.init_weights()
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
+ return new_embeddings
+
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
+ old_num_tokens = self.final_logits_bias.shape[-1]
+ if new_num_tokens <= old_num_tokens:
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
+ else:
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
+ self.register_buffer("final_logits_bias", new_bias)
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(PLBART_GENERATION_EXAMPLE)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.LongTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0])
+ lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids: torch.LongTensor,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ **kwargs, # TODO: Check if this is needed. It is unused?
+ ) -> Dict[str, Any]:
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if decoder_input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
+
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id)
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ PLBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for code
+ classification.
+ """,
+ PLBART_START_DOCSTRING,
+)
+class PLBartForSequenceClassification(PLBartPreTrainedModel):
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ def __init__(self, config: PLBartConfig, **kwargs):
+ super().__init__(config, **kwargs)
+ self.model = PLBartModel(config)
+ self.classification_head = PLBartClassificationHead(
+ config.d_model,
+ config.d_model,
+ config.num_labels,
+ config.classifier_dropout,
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Seq2SeqSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ # Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ if input_ids is None and inputs_embeds is not None:
+ raise NotImplementedError(
+ f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = outputs[0] # last hidden state
+
+ eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
+
+ if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
+ raise ValueError("All examples must have the same number of tokens.")
+ sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
+ :, -1, :
+ ]
+ logits = self.classification_head(sentence_representation)
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.config.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.config.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->PLBart
+class PLBartDecoderWrapper(PLBartPreTrainedModel):
+ """
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
+ used in combination with the [`EncoderDecoderModel`] framework.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.decoder = PLBartDecoder(config)
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->PLBart, facebook/bart-base->uclanlp/plbart-base
+class PLBartForCausalLM(PLBartPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.model = PLBartDecoderWrapper(config)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model.decoder = decoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ if the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, PLBartForCausalLM
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("uclanlp/plbart-base")
+ >>> model = PLBartForCausalLM.from_pretrained("uclanlp/plbart-base", add_cross_attention=False)
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
+ >>> list(logits.shape) == expected_shape
+ True
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = self.lm_head(outputs[0])
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
+ ):
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/venv/lib/python3.10/site-packages/transformers/models/plbart/tokenization_plbart.py b/venv/lib/python3.10/site-packages/transformers/models/plbart/tokenization_plbart.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ab2e33f7f0dba9397e4c3f44a2fb3c187762b36
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/plbart/tokenization_plbart.py
@@ -0,0 +1,425 @@
+# coding=utf-8
+# Copyright 2022, UCLA NLP, The Facebook AI Research Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
+
+
+FAIRSEQ_LANGUAGE_CODES = {
+ "base": ["__java__", "__python__", "__en_XX__"],
+ "multi": ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"],
+}
+
+FAIRSEQ_LANGUAGE_CODES_MAP = {
+ "java": "__java__",
+ "python": "__python__",
+ "en_XX": "__en_XX__",
+ "javascript": "__javascript__",
+ "php": "__php__",
+ "ruby": "__ruby__",
+ "go": "__go__",
+}
+
+
+class PLBartTokenizer(PreTrainedTokenizer):
+ """
+ Construct an PLBART tokenizer.
+
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ The tokenization method is ` ` for source language documents, and `
+ ` for target language documents.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ src_lang (`str`, *optional*):
+ A string representing the source language.
+ tgt_lang (`str`, *optional*):
+ A string representing the target language.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The start of sequence token.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The cls token, which is a special token used as the first token for all tasks.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token(`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masking tasks. This
+ is only used in the `"base"` tokenizer type. For `"multi"` tokenizer, masking is never done for the
+ downstream tasks.
+ language_codes (`str`, *optional*, defaults to `"base"`):
+ What language codes to use. Should be one of `"base"` or `"multi"`.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Examples:
+
+ ```python
+ >>> from transformers import PLBartTokenizer
+
+ >>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-python-en_XX", src_lang="python", tgt_lang="en_XX")
+ >>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])"
+ >>> expected_translation_english = "Returns the maximum value of a b c."
+ >>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt")
+ ```"""
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ prefix_tokens: List[int] = []
+ suffix_tokens: List[int] = []
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ language_codes="base",
+ tokenizer_file=None,
+ src_lang=None,
+ tgt_lang=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ additional_special_tokens=None,
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ src_lang = self._convert_lang_code_special_format(src_lang)
+ tgt_lang = self._convert_lang_code_special_format(tgt_lang)
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(str(vocab_file))
+ self.vocab_file = vocab_file
+ self.language_codes = language_codes
+
+ fairseq_language_codes = FAIRSEQ_LANGUAGE_CODES[self.language_codes]
+
+ # Original fairseq vocab and spm vocab must be "aligned":
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
+ # fairseq | '' | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-'
+ # spm | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
+
+ # Mimic fairseq token-to-id alignment for the first 4 token
+ self.fairseq_tokens_to_ids = {"": 0, "": 1, "": 2, "": 3}
+
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
+ self.fairseq_offset = 1
+
+ self.sp_model_size = len(self.sp_model)
+ self.lang_code_to_id = {
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(fairseq_language_codes)
+ }
+ self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
+
+ if self.language_codes == "base":
+ self.fairseq_tokens_to_ids[""] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
+
+ self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
+ _additional_special_tokens = list(self.lang_code_to_id.keys())
+
+ if additional_special_tokens is not None:
+ # Only add those special tokens if they are not already there.
+ _additional_special_tokens.extend(
+ [t for t in additional_special_tokens if t not in _additional_special_tokens]
+ )
+
+ if self.language_codes == "base":
+ self._src_lang = src_lang
+ self.cur_lang_code_id = (
+ self.lang_code_to_id[self._src_lang] if self._src_lang is not None else self._src_lang
+ )
+ else:
+ self._src_lang = src_lang if src_lang is not None else "__en_XX__"
+ self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ language_codes=language_codes,
+ tokenizer_file=tokenizer_file,
+ src_lang=src_lang,
+ tgt_lang=tgt_lang,
+ additional_special_tokens=_additional_special_tokens,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ self.tgt_lang = tgt_lang
+ self.set_src_lang_special_tokens(self._src_lang)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ @property
+ def vocab_size(self):
+ if self.language_codes == "base":
+ return (
+ len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1
+ ) # Plus 1 for the mask token
+ else:
+ return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
+
+ @property
+ def src_lang(self) -> str:
+ return self._src_lang
+
+ @src_lang.setter
+ def src_lang(self, new_src_lang: str) -> None:
+ new_src_lang = self._convert_lang_code_special_format(new_src_lang)
+ self._src_lang = new_src_lang
+ self.set_src_lang_special_tokens(self._src_lang)
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ prefix_ones = [1] * len(self.prefix_tokens)
+ suffix_ones = [1] * len(self.suffix_tokens)
+ if token_ids_1 is None:
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An PLBART sequence has the following format, where `X` represents the sequence:
+
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
+
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
+ separator.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
+ # We don't expect to process pairs, but leave the pair logic for API consistency
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def _build_translation_inputs(
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
+ ):
+ """Used by translation pipeline, to prepare inputs for the generate function"""
+ if src_lang is None or tgt_lang is None:
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
+ self.src_lang = self._convert_lang_code_special_format(src_lang)
+ self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
+ tgt_lang_id = self.convert_tokens_to_ids(self.tgt_lang)
+ inputs["forced_bos_token_id"] = tgt_lang_id
+ return inputs
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> List[str]:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if token in self.fairseq_tokens_to_ids:
+ return self.fairseq_tokens_to_ids[token]
+ spm_id = self.sp_model.PieceToId(token)
+
+ # Need to return unknown token if the SP model returned 0
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if index in self.fairseq_ids_to_tokens:
+ return self.fairseq_ids_to_tokens[index]
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def prepare_seq2seq_batch(
+ self,
+ src_texts: List[str],
+ src_lang: str = "en_XX",
+ tgt_texts: Optional[List[str]] = None,
+ tgt_lang: str = "python",
+ **kwargs,
+ ) -> BatchEncoding:
+ self.src_lang = self._convert_lang_code_special_format(src_lang)
+ self.tgt_lang = self._convert_lang_code_special_format(tgt_lang)
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
+
+ def _switch_to_input_mode(self):
+ return self.set_src_lang_special_tokens(self.src_lang)
+
+ def _switch_to_target_mode(self):
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
+
+ def set_src_lang_special_tokens(self, src_lang) -> None:
+ """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
+ src_lang = self._convert_lang_code_special_format(src_lang)
+ self.cur_lang_code = self.lang_code_to_id[src_lang] if src_lang is not None else None
+ self.prefix_tokens = []
+ if self.cur_lang_code is not None:
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
+ else:
+ self.suffix_tokens = [self.eos_token_id]
+
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
+ """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
+ lang = self._convert_lang_code_special_format(lang)
+
+ self.cur_lang_code = self.lang_code_to_id[lang] if lang is not None else None
+ self.prefix_tokens = []
+ if self.cur_lang_code is not None:
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
+ else:
+ self.suffix_tokens = [self.eos_token_id]
+
+ def _convert_lang_code_special_format(self, lang: str) -> str:
+ """Convert Language Codes to format tokenizer uses if required"""
+ lang = FAIRSEQ_LANGUAGE_CODES_MAP[lang] if lang in FAIRSEQ_LANGUAGE_CODES_MAP.keys() else lang
+ return lang
diff --git a/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c09b683a3462564069a62157cd92fa674ae4ccd
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__init__.py
@@ -0,0 +1,62 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_time_series_transformer": [
+ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "TimeSeriesTransformerConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_time_series_transformer"] = [
+ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TimeSeriesTransformerForPrediction",
+ "TimeSeriesTransformerModel",
+ "TimeSeriesTransformerPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_time_series_transformer import (
+ TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ TimeSeriesTransformerConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_time_series_transformer import (
+ TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TimeSeriesTransformerForPrediction,
+ TimeSeriesTransformerModel,
+ TimeSeriesTransformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0652d2ffb50d97ff0f57cafde52785984b8e7c44
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/configuration_time_series_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/configuration_time_series_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5fa92940e4af43b0e6b63f34bef4a1930ce2fb34
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/configuration_time_series_transformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/modeling_time_series_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/modeling_time_series_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd0fc39b323b0228ec703462cbf5c6983c001e97
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/__pycache__/modeling_time_series_transformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/configuration_time_series_transformer.py b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/configuration_time_series_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f53f3aad1ec9473f55848df8d7ff1357f704c921
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/configuration_time_series_transformer.py
@@ -0,0 +1,229 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Time Series Transformer model configuration"""
+
+from typing import List, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TimeSeriesTransformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TimeSeriesTransformerModel`]. It is used to
+ instantiate a Time Series Transformer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Time Series
+ Transformer
+ [huggingface/time-series-transformer-tourism-monthly](https://huggingface.co/huggingface/time-series-transformer-tourism-monthly)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ prediction_length (`int`):
+ The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
+ typically dictated by the dataset and we recommend to set it appropriately.
+ context_length (`int`, *optional*, defaults to `prediction_length`):
+ The context length for the encoder. If `None`, the context length will be the same as the
+ `prediction_length`.
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
+ loss (`string`, *optional*, defaults to `"nll"`):
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
+ input_size (`int`, *optional*, defaults to 1):
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
+ multivariate targets.
+ scaling (`string` or `bool`, *optional* defaults to `"mean"`):
+ Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
+ scaler is set to "mean".
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
+ The lags of the input time series as covariates often dictated by the frequency of the data. Default is
+ `[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
+ num_time_features (`int`, *optional*, defaults to 0):
+ The number of time features in the input time series.
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
+ The number of dynamic real valued features.
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
+ The number of static categorical features.
+ num_static_real_features (`int`, *optional*, defaults to 0):
+ The number of static real valued features.
+ cardinality (`list[int]`, *optional*):
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ embedding_dimension (`list[int]`, *optional*):
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ d_model (`int`, *optional*, defaults to 64):
+ Dimensionality of the transformer layers.
+ encoder_layers (`int`, *optional*, defaults to 2):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 2):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
+ `"relu"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the encoder, and decoder.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each encoder layer.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each decoder layer.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability used between the two layers of the feed-forward networks.
+ num_parallel_samples (`int`, *optional*, defaults to 100):
+ The number of samples to generate in parallel for each time step of inference.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated normal weight initialization distribution.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
+
+ Example:
+
+ ```python
+ >>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel
+
+ >>> # Initializing a Time Series Transformer configuration with 12 time steps for prediction
+ >>> configuration = TimeSeriesTransformerConfig(prediction_length=12)
+
+ >>> # Randomly initializing a model (with random weights) from the configuration
+ >>> model = TimeSeriesTransformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "time_series_transformer"
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "encoder_attention_heads",
+ "num_hidden_layers": "encoder_layers",
+ }
+
+ def __init__(
+ self,
+ prediction_length: Optional[int] = None,
+ context_length: Optional[int] = None,
+ distribution_output: str = "student_t",
+ loss: str = "nll",
+ input_size: int = 1,
+ lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7],
+ scaling: Optional[Union[str, bool]] = "mean",
+ num_dynamic_real_features: int = 0,
+ num_static_categorical_features: int = 0,
+ num_static_real_features: int = 0,
+ num_time_features: int = 0,
+ cardinality: Optional[List[int]] = None,
+ embedding_dimension: Optional[List[int]] = None,
+ encoder_ffn_dim: int = 32,
+ decoder_ffn_dim: int = 32,
+ encoder_attention_heads: int = 2,
+ decoder_attention_heads: int = 2,
+ encoder_layers: int = 2,
+ decoder_layers: int = 2,
+ is_encoder_decoder: bool = True,
+ activation_function: str = "gelu",
+ d_model: int = 64,
+ dropout: float = 0.1,
+ encoder_layerdrop: float = 0.1,
+ decoder_layerdrop: float = 0.1,
+ attention_dropout: float = 0.1,
+ activation_dropout: float = 0.1,
+ num_parallel_samples: int = 100,
+ init_std: float = 0.02,
+ use_cache=True,
+ **kwargs,
+ ):
+ # time series specific configuration
+ self.prediction_length = prediction_length
+ self.context_length = context_length or prediction_length
+ self.distribution_output = distribution_output
+ self.loss = loss
+ self.input_size = input_size
+ self.num_time_features = num_time_features
+ self.lags_sequence = lags_sequence
+ self.scaling = scaling
+ self.num_dynamic_real_features = num_dynamic_real_features
+ self.num_static_real_features = num_static_real_features
+ self.num_static_categorical_features = num_static_categorical_features
+ if cardinality and num_static_categorical_features > 0:
+ if len(cardinality) != num_static_categorical_features:
+ raise ValueError(
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.cardinality = cardinality
+ else:
+ self.cardinality = [0]
+ if embedding_dimension and num_static_categorical_features > 0:
+ if len(embedding_dimension) != num_static_categorical_features:
+ raise ValueError(
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.embedding_dimension = embedding_dimension
+ else:
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
+ self.num_parallel_samples = num_parallel_samples
+
+ # Transformer architecture configuration
+ self.feature_size = input_size * len(lags_sequence) + self._number_of_features
+ self.d_model = d_model
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_attention_heads = decoder_attention_heads
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.decoder_layers = decoder_layers
+
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+
+ self.activation_function = activation_function
+ self.init_std = init_std
+
+ self.use_cache = use_cache
+
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
+
+ @property
+ def _number_of_features(self) -> int:
+ return (
+ sum(self.embedding_dimension)
+ + self.num_dynamic_real_features
+ + self.num_time_features
+ + self.num_static_real_features
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/modeling_time_series_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab46d3a92a185342670a99d3c9849e7363283542
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/time_series_transformer/modeling_time_series_transformer.py
@@ -0,0 +1,1784 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Time Series Transformer model."""
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ SampleTSPredictionOutput,
+ Seq2SeqTSModelOutput,
+ Seq2SeqTSPredictionOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_time_series_transformer import TimeSeriesTransformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "TimeSeriesTransformerConfig"
+
+
+from ..deprecated._archive_maps import TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TimeSeriesFeatureEmbedder(nn.Module):
+ """
+ Embed a sequence of categorical features.
+
+ Args:
+ cardinalities (`list[int]`):
+ List of cardinalities of the categorical features.
+ embedding_dims (`list[int]`):
+ List of embedding dimensions of the categorical features.
+ """
+
+ def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:
+ super().__init__()
+
+ self.num_features = len(cardinalities)
+ self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ if self.num_features > 1:
+ # we slice the last dimension, giving an array of length
+ # self.num_features with shape (N,T) or (N)
+ cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
+ else:
+ cat_feature_slices = [features]
+
+ return torch.cat(
+ [
+ embed(cat_feature_slice.squeeze(-1))
+ for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)
+ ],
+ dim=-1,
+ )
+
+
+class TimeSeriesStdScaler(nn.Module):
+ """
+ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
+ subtracting from the mean and dividing by the standard deviation.
+ """
+
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+ self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ Calculating the scale on the observed indicator.
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
+ denominator = denominator.clamp_min(1.0)
+ loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
+
+ variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
+ scale = torch.sqrt(variance + self.minimum_scale)
+ return (data - loc) / scale, loc, scale
+
+
+class TimeSeriesMeanScaler(nn.Module):
+ """
+ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
+ accordingly.
+ """
+
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+ self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10
+ self.default_scale = config.default_scale if hasattr(config, "default_scale") else None
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ Calculating the scale on the observed indicator.
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
+ num_observed = observed_indicator.sum(self.dim, keepdim=True)
+
+ scale = ts_sum / torch.clamp(num_observed, min=1)
+
+ # If `default_scale` is provided, we use it, otherwise we use the scale
+ # of the batch.
+ if self.default_scale is None:
+ batch_sum = ts_sum.sum(dim=0)
+ batch_observations = torch.clamp(num_observed.sum(0), min=1)
+ default_scale = torch.squeeze(batch_sum / batch_observations)
+ else:
+ default_scale = self.default_scale * torch.ones_like(scale)
+
+ # apply default scale where there are no observations
+ scale = torch.where(num_observed > 0, scale, default_scale)
+
+ # ensure the scale is at least `self.minimum_scale`
+ scale = torch.clamp(scale, min=self.minimum_scale)
+ scaled_data = data / scale
+
+ if not self.keepdim:
+ scale = scale.squeeze(dim=self.dim)
+
+ return scaled_data, torch.zeros_like(scale), scale
+
+
+class TimeSeriesNOPScaler(nn.Module):
+ """
+ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
+ """
+
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor = None
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ return data, loc, scale
+
+
+def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
+ """
+ Computes the negative log likelihood loss from input distribution with respect to target.
+ """
+ return -input.log_prob(target)
+
+
+def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:
+ """
+ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
+ meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
+
+ Args:
+ input_tensor (`torch.FloatTensor`):
+ Input tensor, of which the average must be computed.
+ weights (`torch.FloatTensor`, *optional*):
+ Weights tensor, of the same shape as `input_tensor`.
+ dim (`int`, *optional*):
+ The dim along which to average `input_tensor`.
+
+ Returns:
+ `torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
+ """
+ if weights is not None:
+ weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
+ sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
+ return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
+ else:
+ return input_tensor.mean(dim=dim)
+
+
+# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->TimeSeries
+class TimeSeriesSinusoidalPositionalEmbedding(nn.Embedding):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
+ super().__init__(num_positions, embedding_dim)
+ self.weight = self._init_weight(self.weight)
+
+ @staticmethod
+ def _init_weight(out: nn.Parameter) -> nn.Parameter:
+ """
+ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
+ the 2nd half of the vector. [dim // 2:]
+ """
+ n_pos, dim = out.shape
+ position_enc = np.array(
+ [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
+ )
+ out.requires_grad = False # set early to avoid an error in pytorch-1.8+
+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
+ out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+ return out
+
+ @torch.no_grad()
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ bsz, seq_len = input_ids_shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ )
+ return super().forward(positions)
+
+
+class TimeSeriesValueEmbedding(nn.Module):
+ def __init__(self, feature_size, d_model):
+ super().__init__()
+ self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)
+
+ def forward(self, x):
+ return self.value_projection(x)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->TimeSeriesTransformer
+class TimeSeriesTransformerAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[TimeSeriesTransformerConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->TimeSeriesTransformer, BART->TIME_SERIES_TRANSFORMER
+class TimeSeriesTransformerEncoderLayer(nn.Module):
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ config=config,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ attention_mask: torch.FloatTensor,
+ layer_head_mask: torch.FloatTensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# TODO: Implement attention with SDPA for TimeSeriesTransformer.
+TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES = {
+ "eager": TimeSeriesTransformerAttention,
+}
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->TimeSeriesTransformer, with BART->TIME_SERIES_TRANSFORMER
+class TimeSeriesTransformerDecoderLayer(nn.Module):
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ is_causal=True,
+ config=config,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = TIME_SERIES_TRANSFORMER_ATTENTION_CLASSES[config._attn_implementation](
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ config=config,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class TimeSeriesTransformerPreTrainedModel(PreTrainedModel):
+ config_class = TimeSeriesTransformerConfig
+ base_model_prefix = "model"
+ main_input_name = "past_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, TimeSeriesSinusoidalPositionalEmbedding):
+ pass
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+TIME_SERIES_TRANSFORMER_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TimeSeriesTransformerConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
+ Past values of the time series, that serve as context in order to predict the future. The sequence size of
+ this tensor must be larger than the `context_length` of the model, since the model will use the larger size
+ to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
+ context".
+
+ The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
+ `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
+ look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
+ the past.
+
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
+ `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
+
+ Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
+ variates in the time series per time step.
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
+ Required time features, which the model internally will add to `past_values`. These could be things like
+ "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
+ could also be so-called "age" features, which basically help the model know "at which point in life" a
+ time-series is. Age features have small values for distant past time steps and increase monotonically the
+ more we approach the current time step. Holiday features are also a good example of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional time features. The Time Series Transformer only learns
+ additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
+ must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to the
+ values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static categorical feature is a time series ID.
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+ future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
+ Future values of the time series, that serve as labels for the model. The `future_values` is what the
+ Transformer needs during training to learn to output, given the `past_values`.
+
+ The sequence length here is equal to `prediction_length`.
+
+ See the demo notebook and code snippets for details.
+
+ Optionally, during training any missing values need to be replaced with zeros and indicated via the
+ `future_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
+ variates in the time series per time step.
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
+ Required time features for the prediction window, which the model internally will add to `future_values`.
+ These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
+ Fourier features). These could also be so-called "age" features, which basically help the model know "at
+ which point in life" a time-series is. Age features have small values for distant past time steps and
+ increase monotonically the more we approach the current time step. Holiday features are also a good example
+ of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional time features. The Time Series Transformer only learns
+ additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
+ must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ This mask is used to filter out missing values for the final loss calculation.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to
+ make sure the model can only look at previous inputs in order to predict the future.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TimeSeriesTransformerEncoderLayer`].
+
+ Args:
+ config: TimeSeriesTransformerConfig
+ """
+
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([TimeSeriesTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(inputs_embeds.size())
+
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
+ [`TimeSeriesTransformerDecoderLayer`]
+
+ Args:
+ config: TimeSeriesTransformerConfig
+ """
+
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([TimeSeriesTransformerDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ Args:
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ input_shape = inputs_embeds.size()[:-1]
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length)
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Time Series Transformer Model outputting raw hidden-states without any specific head on top.",
+ TIME_SERIES_TRANSFORMER_START_DOCSTRING,
+)
+class TimeSeriesTransformerModel(TimeSeriesTransformerPreTrainedModel):
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__(config)
+
+ if config.scaling == "mean" or config.scaling is True:
+ self.scaler = TimeSeriesMeanScaler(config)
+ elif config.scaling == "std":
+ self.scaler = TimeSeriesStdScaler(config)
+ else:
+ self.scaler = TimeSeriesNOPScaler(config)
+
+ if config.num_static_categorical_features > 0:
+ self.embedder = TimeSeriesFeatureEmbedder(
+ cardinalities=config.cardinality,
+ embedding_dims=config.embedding_dimension,
+ )
+
+ # transformer encoder-decoder and mask initializer
+ self.encoder = TimeSeriesTransformerEncoder(config)
+ self.decoder = TimeSeriesTransformerDecoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @property
+ def _past_length(self) -> int:
+ return self.config.context_length + max(self.config.lags_sequence)
+
+ def get_lagged_subsequences(
+ self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0
+ ) -> torch.Tensor:
+ """
+ Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),
+ where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,
+ j, :, k] = sequence[i, -indices[k]-S+j, :].
+
+ Args:
+ sequence: Tensor
+ The sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
+ subsequences_length : int
+ Length of the subsequences to be extracted.
+ shift: int
+ Shift the lags by this amount back.
+ """
+ sequence_length = sequence.shape[1]
+ indices = [lag - shift for lag in self.config.lags_sequence]
+
+ if max(indices) + subsequences_length > sequence_length:
+ raise ValueError(
+ f"lags cannot go further than history length, found lag {max(indices)} "
+ f"while history length is only {sequence_length}"
+ )
+
+ lagged_values = []
+ for lag_index in indices:
+ begin_index = -lag_index - subsequences_length
+ end_index = -lag_index if lag_index > 0 else None
+ lagged_values.append(sequence[:, begin_index:end_index, ...])
+ return torch.stack(lagged_values, dim=-1)
+
+ def create_network_inputs(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ ):
+ # time feature
+ time_feat = (
+ torch.cat(
+ (
+ past_time_features[:, self._past_length - self.config.context_length :, ...],
+ future_time_features,
+ ),
+ dim=1,
+ )
+ if future_values is not None
+ else past_time_features[:, self._past_length - self.config.context_length :, ...]
+ )
+
+ # target
+ if past_observed_mask is None:
+ past_observed_mask = torch.ones_like(past_values)
+
+ context = past_values[:, -self.config.context_length :]
+ observed_context = past_observed_mask[:, -self.config.context_length :]
+ _, loc, scale = self.scaler(context, observed_context)
+
+ inputs = (
+ (torch.cat((past_values, future_values), dim=1) - loc) / scale
+ if future_values is not None
+ else (past_values - loc) / scale
+ )
+
+ # static features
+ log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p()
+ log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log()
+ static_feat = torch.cat((log_abs_loc, log_scale), dim=1)
+
+ if static_real_features is not None:
+ static_feat = torch.cat((static_real_features, static_feat), dim=1)
+ if static_categorical_features is not None:
+ embedded_cat = self.embedder(static_categorical_features)
+ static_feat = torch.cat((embedded_cat, static_feat), dim=1)
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)
+
+ # all features
+ features = torch.cat((expanded_static_feat, time_feat), dim=-1)
+
+ # lagged features
+ subsequences_length = (
+ self.config.context_length + self.config.prediction_length
+ if future_values is not None
+ else self.config.context_length
+ )
+ lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+
+ if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:
+ raise ValueError(
+ f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match"
+ )
+
+ # transformer inputs
+ transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1)
+
+ return transformer_inputs, loc, scale, static_feat
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqTSModelOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import TimeSeriesTransformerModel
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly")
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_inputs, loc, scale, static_feat = self.create_network_inputs(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ )
+
+ if encoder_outputs is None:
+ enc_input = transformer_inputs[:, : self.config.context_length, ...]
+ encoder_outputs = self.encoder(
+ inputs_embeds=enc_input,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ dec_input = transformer_inputs[:, self.config.context_length :, ...]
+ decoder_outputs = self.decoder(
+ inputs_embeds=dec_input,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs + (loc, scale, static_feat)
+
+ return Seq2SeqTSModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ loc=loc,
+ scale=scale,
+ static_features=static_feat,
+ )
+
+
+@add_start_docstrings(
+ "The Time Series Transformer Model with a distribution head on top for time-series forecasting.",
+ TIME_SERIES_TRANSFORMER_START_DOCSTRING,
+)
+class TimeSeriesTransformerForPrediction(TimeSeriesTransformerPreTrainedModel):
+ def __init__(self, config: TimeSeriesTransformerConfig):
+ super().__init__(config)
+ self.model = TimeSeriesTransformerModel(config)
+ if config.distribution_output == "student_t":
+ self.distribution_output = StudentTOutput(dim=config.input_size)
+ elif config.distribution_output == "normal":
+ self.distribution_output = NormalOutput(dim=config.input_size)
+ elif config.distribution_output == "negative_binomial":
+ self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
+ else:
+ raise ValueError(f"Unknown distribution output {config.distribution_output}")
+
+ self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model)
+ self.target_shape = self.distribution_output.event_shape
+
+ if config.loss == "nll":
+ self.loss = nll
+ else:
+ raise ValueError(f"Unknown loss function {config.loss}")
+
+ # Initialize weights of distribution_output and apply final processing
+ self.post_init()
+
+ def output_params(self, dec_output):
+ return self.parameter_projection(dec_output)
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ @torch.jit.ignore
+ def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
+ sliced_params = params
+ if trailing_n is not None:
+ sliced_params = [p[:, -trailing_n:] for p in params]
+ return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
+
+ @add_start_docstrings_to_model_forward(TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ future_observed_mask: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqTSModelOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import TimeSeriesTransformerForPrediction
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = TimeSeriesTransformerForPrediction.from_pretrained(
+ ... "huggingface/time-series-transformer-tourism-monthly"
+ ... )
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> loss = outputs.loss
+ >>> loss.backward()
+
+ >>> # during inference, one only provides past values
+ >>> # as well as possible additional features
+ >>> # the model autoregressively generates future values
+ >>> outputs = model.generate(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> mean_prediction = outputs.sequences.mean(dim=1)
+ ```"""
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if future_values is not None:
+ use_cache = False
+
+ outputs = self.model(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ prediction_loss = None
+ params = None
+ if future_values is not None:
+ params = self.output_params(outputs[0]) # outputs.last_hidden_state
+ # loc is 3rd last and scale is 2nd last output
+ distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
+
+ loss = self.loss(distribution, future_values)
+
+ if future_observed_mask is None:
+ future_observed_mask = torch.ones_like(future_values)
+
+ if len(self.target_shape) == 0:
+ loss_weights = future_observed_mask
+ else:
+ loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
+
+ prediction_loss = weighted_average(loss, weights=loss_weights)
+
+ if not return_dict:
+ outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:]
+ return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs
+
+ return Seq2SeqTSPredictionOutput(
+ loss=prediction_loss,
+ params=params,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ loc=outputs.loc,
+ scale=outputs.scale,
+ static_features=outputs.static_features,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ future_time_features: torch.Tensor,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) -> SampleTSPredictionOutput:
+ r"""
+ Greedily generate sequences of sample predictions from a model with a probability distribution head.
+
+ Parameters:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
+ Past values of the time series, that serve as context in order to predict the future. The sequence size
+ of this tensor must be larger than the `context_length` of the model, since the model will use the
+ larger size to construct lag features, i.e. additional values from the past which are added in order to
+ serve as "extra context".
+
+ The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
+ no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
+ look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
+ of the past.
+
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features,
+ such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
+
+ Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
+ of variates in the time series per time step.
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
+ Required time features, which the model internally will add to `past_values`. These could be things
+ like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
+ These could also be so-called "age" features, which basically help the model know "at which point in
+ life" a time-series is. Age features have small values for distant past time steps and increase
+ monotonically the more we approach the current time step. Holiday features are also a good example of
+ time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
+ Required time features for the prediction window, which the model internally will add to sampled
+ predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
+ (for instance as Fourier features). These could also be so-called "age" features, which basically help
+ the model know "at which point in life" a time-series is. Age features have small values for distant
+ past time steps and increase monotonically the more we approach the current time step. Holiday features
+ are also a good example of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to
+ the values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over
+ time).
+
+ A typical example of a static categorical feature is a time series ID.
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers.
+
+ Return:
+ [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
+ samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
+ multivariate predictions.
+ """
+ outputs = self(
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ past_time_features=past_time_features,
+ past_values=past_values,
+ past_observed_mask=past_observed_mask,
+ future_time_features=future_time_features,
+ future_values=None,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ use_cache=True,
+ )
+
+ decoder = self.model.get_decoder()
+ enc_last_hidden = outputs.encoder_last_hidden_state
+ loc = outputs.loc
+ scale = outputs.scale
+ static_feat = outputs.static_features
+
+ num_parallel_samples = self.config.num_parallel_samples
+ repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
+ repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_past_values = (
+ past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc
+ ) / repeated_scale
+
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1)
+ features = torch.cat((expanded_static_feat, future_time_features), dim=-1)
+ repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ future_samples = []
+
+ # greedy decoding
+ for k in range(self.config.prediction_length):
+ lagged_sequence = self.model.get_lagged_subsequences(
+ sequence=repeated_past_values,
+ subsequences_length=1 + k,
+ shift=1,
+ )
+
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+
+ decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1)
+
+ dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden)
+ dec_last_hidden = dec_output.last_hidden_state
+
+ params = self.parameter_projection(dec_last_hidden[:, -1:])
+ distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
+ next_sample = distr.sample()
+
+ repeated_past_values = torch.cat(
+ (repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1
+ )
+ future_samples.append(next_sample)
+
+ concat_future_samples = torch.cat(future_samples, dim=1)
+
+ return SampleTSPredictionOutput(
+ sequences=concat_future_samples.reshape(
+ (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape,
+ )
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vilt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/vilt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d5afba10dacfcdd5691c42b4d56b0aeed92d78b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vilt/__init__.py
@@ -0,0 +1,85 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {"configuration_vilt": ["VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViltConfig"]}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_vilt"] = ["ViltFeatureExtractor"]
+ _import_structure["image_processing_vilt"] = ["ViltImageProcessor"]
+ _import_structure["processing_vilt"] = ["ViltProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_vilt"] = [
+ "VILT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ViltForImageAndTextRetrieval",
+ "ViltForImagesAndTextClassification",
+ "ViltForTokenClassification",
+ "ViltForMaskedLM",
+ "ViltForQuestionAnswering",
+ "ViltLayer",
+ "ViltModel",
+ "ViltPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_vilt import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViltConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_vilt import ViltFeatureExtractor
+ from .image_processing_vilt import ViltImageProcessor
+ from .processing_vilt import ViltProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_vilt import (
+ VILT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ViltForImageAndTextRetrieval,
+ ViltForImagesAndTextClassification,
+ ViltForMaskedLM,
+ ViltForQuestionAnswering,
+ ViltForTokenClassification,
+ ViltLayer,
+ ViltModel,
+ ViltPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vilt/configuration_vilt.py b/venv/lib/python3.10/site-packages/transformers/models/vilt/configuration_vilt.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ad4bde69494d77b9d43c0f8f2480d2be24a3d6a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vilt/configuration_vilt.py
@@ -0,0 +1,147 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" VilT model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class ViltConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ViLTModel`]. It is used to instantiate an ViLT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the ViLT
+ [dandelin/vilt-b32-mlm](https://huggingface.co/dandelin/vilt-b32-mlm) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the text part of the model. Defines the number of different tokens that can be
+ represented by the `inputs_ids` passed when calling [`ViltModel`].
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`ViltModel`]. This is used when encoding
+ text.
+ modality_type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the modalities passed when calling [`ViltModel`]. This is used after concatening the
+ embeddings of the text and image modalities.
+ max_position_embeddings (`int`, *optional*, defaults to 40):
+ The maximum sequence length that this model might ever be used with.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to 384):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 32):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ max_image_length (`int`, *optional*, defaults to -1):
+ The maximum number of patches to take as input for the Transformer encoder. If set to a positive integer,
+ the encoder will sample `max_image_length` patches at maximum. If set to -1, will not be taken into
+ account.
+ num_images (`int`, *optional*, defaults to -1):
+ The number of images to use for natural language visual reasoning. If set to a positive integer, will be
+ used by [`ViltForImagesAndTextClassification`] for defining the classifier head.
+
+ Example:
+
+ ```python
+ >>> from transformers import ViLTModel, ViLTConfig
+
+ >>> # Initializing a ViLT dandelin/vilt-b32-mlm style configuration
+ >>> configuration = ViLTConfig()
+
+ >>> # Initializing a model from the dandelin/vilt-b32-mlm style configuration
+ >>> model = ViLTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "vilt"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ type_vocab_size=2,
+ modality_type_vocab_size=2,
+ max_position_embeddings=40,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ image_size=384,
+ patch_size=32,
+ num_channels=3,
+ qkv_bias=True,
+ max_image_length=-1,
+ tie_word_embeddings=False,
+ num_images=-1,
+ **kwargs,
+ ):
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.type_vocab_size = type_vocab_size
+ self.modality_type_vocab_size = modality_type_vocab_size
+ self.max_position_embeddings = max_position_embeddings
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
+ self.max_image_length = max_image_length
+ self.num_images = num_images
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vilt/convert_vilt_original_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/vilt/convert_vilt_original_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..e597d0d7e778b7e0fff61e5c1eec83996170b2e1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vilt/convert_vilt_original_to_pytorch.py
@@ -0,0 +1,300 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ViLT checkpoints from the original Github repository."""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import (
+ BertTokenizer,
+ ViltConfig,
+ ViltForImageAndTextRetrieval,
+ ViltForImagesAndTextClassification,
+ ViltForMaskedLM,
+ ViltForQuestionAnswering,
+ ViltImageProcessor,
+ ViltProcessor,
+)
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config, vqa_model=False, nlvr_model=False, irtr_model=False):
+ rename_keys = []
+ for i in range(config.num_hidden_layers):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight"))
+ rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias"))
+ rename_keys.append(
+ (f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight")
+ )
+ rename_keys.append(
+ (f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias")
+ )
+ rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight"))
+ rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias"))
+ rename_keys.append(
+ (f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight")
+ )
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias"))
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight"))
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias"))
+
+ # embeddings
+ rename_keys.extend(
+ [
+ # text embeddings
+ ("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
+ (
+ "text_embeddings.position_embeddings.weight",
+ "vilt.embeddings.text_embeddings.position_embeddings.weight",
+ ),
+ ("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
+ (
+ "text_embeddings.token_type_embeddings.weight",
+ "vilt.embeddings.text_embeddings.token_type_embeddings.weight",
+ ),
+ ("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
+ ("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
+ # patch embeddings
+ ("transformer.cls_token", "vilt.embeddings.cls_token"),
+ ("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
+ ("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
+ ("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
+ # token type embeddings
+ ("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
+ ]
+ )
+
+ # final layernorm + pooler
+ rename_keys.extend(
+ [
+ ("transformer.norm.weight", "vilt.layernorm.weight"),
+ ("transformer.norm.bias", "vilt.layernorm.bias"),
+ ("pooler.dense.weight", "vilt.pooler.dense.weight"),
+ ("pooler.dense.bias", "vilt.pooler.dense.bias"),
+ ]
+ )
+
+ # classifier head(s)
+ if vqa_model:
+ # classification head
+ rename_keys.extend(
+ [
+ ("vqa_classifier.0.weight", "classifier.0.weight"),
+ ("vqa_classifier.0.bias", "classifier.0.bias"),
+ ("vqa_classifier.1.weight", "classifier.1.weight"),
+ ("vqa_classifier.1.bias", "classifier.1.bias"),
+ ("vqa_classifier.3.weight", "classifier.3.weight"),
+ ("vqa_classifier.3.bias", "classifier.3.bias"),
+ ]
+ )
+ elif nlvr_model:
+ # classification head
+ rename_keys.extend(
+ [
+ ("nlvr2_classifier.0.weight", "classifier.0.weight"),
+ ("nlvr2_classifier.0.bias", "classifier.0.bias"),
+ ("nlvr2_classifier.1.weight", "classifier.1.weight"),
+ ("nlvr2_classifier.1.bias", "classifier.1.bias"),
+ ("nlvr2_classifier.3.weight", "classifier.3.weight"),
+ ("nlvr2_classifier.3.bias", "classifier.3.bias"),
+ ]
+ )
+ else:
+ pass
+
+ return rename_keys
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, config):
+ for i in range(config.num_hidden_layers):
+ prefix = "vilt."
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight")
+ in_proj_bias = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
+ : config.hidden_size, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ config.hidden_size : config.hidden_size * 2, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
+ config.hidden_size : config.hidden_size * 2
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
+ -config.hidden_size :, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
+
+
+def remove_classification_head_(state_dict):
+ ignore_keys = ["head.weight", "head.bias"]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+@torch.no_grad()
+def convert_vilt_checkpoint(checkpoint_url, pytorch_dump_folder_path):
+ """
+ Copy/paste/tweak model's weights to our ViLT structure.
+ """
+
+ # define configuration and initialize HuggingFace model
+ config = ViltConfig(image_size=384, patch_size=32, tie_word_embeddings=False)
+ mlm_model = False
+ vqa_model = False
+ nlvr_model = False
+ irtr_model = False
+ if "vqa" in checkpoint_url:
+ vqa_model = True
+ config.num_labels = 3129
+ repo_id = "huggingface/label-files"
+ filename = "vqa2-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ model = ViltForQuestionAnswering(config)
+ elif "nlvr" in checkpoint_url:
+ nlvr_model = True
+ config.num_labels = 2
+ config.id2label = {0: "False", 1: "True"}
+ config.label2id = {v: k for k, v in config.id2label.items()}
+ config.modality_type_vocab_size = 3
+ model = ViltForImagesAndTextClassification(config)
+ elif "irtr" in checkpoint_url:
+ irtr_model = True
+ model = ViltForImageAndTextRetrieval(config)
+ elif "mlm_itm" in checkpoint_url:
+ mlm_model = True
+ model = ViltForMaskedLM(config)
+ else:
+ raise ValueError("Unknown model type")
+
+ # load state_dict of original model, remove and rename some keys
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"]
+ rename_keys = create_rename_keys(config, vqa_model, nlvr_model, irtr_model)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, config)
+ if mlm_model or irtr_model:
+ ignore_keys = ["itm_score.fc.weight", "itm_score.fc.bias"]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+ # load state dict into HuggingFace model
+ model.eval()
+ if mlm_model:
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
+ assert missing_keys == ["mlm_score.decoder.bias"]
+ else:
+ model.load_state_dict(state_dict)
+
+ # Define processor
+ image_processor = ViltImageProcessor(size=384)
+ tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
+ processor = ViltProcessor(image_processor, tokenizer)
+
+ # Forward pass on example inputs (image + text)
+ if nlvr_model:
+ image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
+ image2 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
+ text = (
+ "The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
+ " standing."
+ )
+ encoding_1 = processor(image1, text, return_tensors="pt")
+ encoding_2 = processor(image2, text, return_tensors="pt")
+ outputs = model(
+ input_ids=encoding_1.input_ids,
+ pixel_values=encoding_1.pixel_values,
+ pixel_values_2=encoding_2.pixel_values,
+ )
+ else:
+ image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
+ if mlm_model:
+ text = "a bunch of [MASK] laying on a [MASK]."
+ else:
+ text = "How many cats are there?"
+ encoding = processor(image, text, return_tensors="pt")
+ outputs = model(**encoding)
+
+ # Verify outputs
+ if mlm_model:
+ expected_shape = torch.Size([1, 11, 30522])
+ expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174])
+ assert outputs.logits.shape == expected_shape
+ assert torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4)
+
+ # verify masked token prediction equals "cats"
+ predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
+ assert tokenizer.decode([predicted_id]) == "cats"
+ elif vqa_model:
+ expected_shape = torch.Size([1, 3129])
+ expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041])
+ assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
+ assert outputs.logits.shape == expected_shape
+ assert torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4)
+
+ # verify vqa prediction equals "2"
+ predicted_idx = outputs.logits.argmax(-1).item()
+ assert model.config.id2label[predicted_idx] == "2"
+ elif nlvr_model:
+ expected_shape = torch.Size([1, 2])
+ expected_slice = torch.tensor([-2.8721, 2.1291])
+ assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
+ assert outputs.logits.shape == expected_shape
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
+ type=str,
+ help="URL of the checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+
+ args = parser.parse_args()
+ convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vilt/processing_vilt.py b/venv/lib/python3.10/site-packages/transformers/models/vilt/processing_vilt.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ccb884ea00c9d1b9df3322281083ddf166e5dc9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vilt/processing_vilt.py
@@ -0,0 +1,148 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for ViLT.
+"""
+
+import warnings
+from typing import List, Optional, Union
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from ...utils import TensorType
+
+
+class ViltProcessor(ProcessorMixin):
+ r"""
+ Constructs a ViLT processor which wraps a BERT tokenizer and ViLT image processor into a single processor.
+
+ [`ViltProcessor`] offers all the functionalities of [`ViltImageProcessor`] and [`BertTokenizerFast`]. See the
+ docstring of [`~ViltProcessor.__call__`] and [`~ViltProcessor.decode`] for more information.
+
+ Args:
+ image_processor (`ViltImageProcessor`, *optional*):
+ An instance of [`ViltImageProcessor`]. The image processor is a required input.
+ tokenizer (`BertTokenizerFast`, *optional*):
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "ViltImageProcessor"
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ feature_extractor = None
+ if "feature_extractor" in kwargs:
+ warnings.warn(
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
+ " instead.",
+ FutureWarning,
+ )
+ feature_extractor = kwargs.pop("feature_extractor")
+
+ image_processor = image_processor if image_processor is not None else feature_extractor
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+
+ def __call__(
+ self,
+ images,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ This method uses [`ViltImageProcessor.__call__`] method to prepare image(s) for the model, and
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ encoding = self.tokenizer(
+ text=text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+ # add pixel_values + pixel_mask
+ encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
+ encoding.update(encoding_image_processor)
+
+ return encoding
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
+
+ @property
+ def feature_extractor_class(self):
+ warnings.warn(
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
+ FutureWarning,
+ )
+ return self.image_processor_class
+
+ @property
+ def feature_extractor(self):
+ warnings.warn(
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
+ FutureWarning,
+ )
+ return self.image_processor